Merge "Parse audio config codec capability for LE devices"
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index a70cc4b..0e9740a 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -2165,7 +2165,7 @@
      *
      * <p>If this value is greater than 1, then the device supports controlling the
      * flashlight brightness level via
-     * {android.hardware.camera2.CameraManager#setTorchStrengthLevel}.
+     * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.
      * If this value is equal to 1, flashlight brightness control is not supported.
      * The value for this key will be null for devices with no flash unit.</p>
      */
@@ -2173,7 +2173,7 @@
             ACAMERA_FLASH_INFO_START + 2,
     /**
      * <p>Default flashlight brightness level to be set via
-     * {android.hardware.camera2.CameraManager#setTorchStrengthLevel}.</p>
+     * {android.hardware.camera2.CameraManager#turnOnTorchWithStrengthLevel}.</p>
      *
      * <p>Type: int32</p>
      *
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 726a6be..e3db5b4 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -82,10 +82,8 @@
         IPCThreadState::self()->joinThreadPool();
         for (;;) {
             siginfo_t info;
-            int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
-            if (ret == EINTR) {
-                continue;
-            }
+            int ret = TEMP_FAILURE_RETRY(waitid(P_PID, childPid, &info,
+                                                WEXITED | WSTOPPED | WCONTINUED));
             if (ret < 0) {
                 break;
             }
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 7486d27..617769b 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -31,6 +31,255 @@
 
 namespace android {
 
+C2SoftVpxEnc::IntfImpl::IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+    : SimpleInterface<void>::BaseParams(
+            helper,
+            COMPONENT_NAME,
+            C2Component::KIND_ENCODER,
+            C2Component::DOMAIN_VIDEO,
+            MEDIA_MIMETYPE_VIDEO) {
+    noPrivateBuffers(); // TODO: account for our buffers here
+    noInputReferences();
+    noOutputReferences();
+    noInputLatency();
+    noTimeStretch();
+    setDerivedInstance(this);
+
+    addParameter(
+            DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
+            .withConstValue(new C2ComponentAttributesSetting(
+                C2Component::ATTRIB_IS_TEMPORAL))
+            .build());
+
+    addParameter(
+            DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+            .withConstValue(new C2StreamUsageTuning::input(
+                    0u, (uint64_t)C2MemoryUsage::CPU_READ))
+            .build());
+
+    addParameter(
+        DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+            .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+            .withFields({
+                C2F(mSize, width).inRange(2, 2048, 2),
+                C2F(mSize, height).inRange(2, 2048, 2),
+            })
+            .withSetter(SizeSetter)
+            .build());
+
+    addParameter(
+        DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
+            .withDefault(new C2StreamBitrateModeTuning::output(
+                    0u, C2Config::BITRATE_VARIABLE))
+            .withFields({
+                C2F(mBitrateMode, value).oneOf({
+                    C2Config::BITRATE_CONST, C2Config::BITRATE_VARIABLE })
+            })
+            .withSetter(
+                Setter<decltype(*mBitrateMode)>::StrictValueWithNoDeps)
+            .build());
+
+    addParameter(
+        DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
+            .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+            // TODO: More restriction?
+            .withFields({C2F(mFrameRate, value).greaterThan(0.)})
+            .withSetter(
+                Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
+            .build());
+
+    addParameter(
+        DefineParam(mLayering, C2_PARAMKEY_TEMPORAL_LAYERING)
+            .withDefault(C2StreamTemporalLayeringTuning::output::AllocShared(0u, 0, 0, 0))
+            .withFields({
+                C2F(mLayering, m.layerCount).inRange(0, 4),
+                C2F(mLayering, m.bLayerCount).inRange(0, 0),
+                C2F(mLayering, m.bitrateRatios).inRange(0., 1.)
+            })
+            .withSetter(LayeringSetter)
+            .build());
+
+    addParameter(
+            DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
+            .withDefault(new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
+            .withFields({C2F(mSyncFramePeriod, value).any()})
+            .withSetter(Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
+            .build());
+
+    addParameter(
+        DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+            .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
+            .withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
+            .withSetter(BitrateSetter)
+            .build());
+
+    addParameter(
+            DefineParam(mIntraRefresh, C2_PARAMKEY_INTRA_REFRESH)
+            .withConstValue(new C2StreamIntraRefreshTuning::output(
+                            0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
+            .build());
+#ifdef VP9
+    addParameter(
+            DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+            .withDefault(new C2StreamProfileLevelInfo::output(
+                    0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
+            .withFields({
+                C2F(mProfileLevel, profile).equalTo(
+                    PROFILE_VP9_0
+                ),
+                C2F(mProfileLevel, level).equalTo(
+                    LEVEL_VP9_4_1),
+            })
+            .withSetter(ProfileLevelSetter)
+            .build());
+#else
+    addParameter(
+            DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+            .withDefault(new C2StreamProfileLevelInfo::output(
+                    0u, PROFILE_VP8_0, LEVEL_UNUSED))
+            .withFields({
+                C2F(mProfileLevel, profile).equalTo(
+                    PROFILE_VP8_0
+                ),
+                C2F(mProfileLevel, level).equalTo(
+                    LEVEL_UNUSED),
+            })
+            .withSetter(ProfileLevelSetter)
+            .build());
+#endif
+    addParameter(
+            DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
+            .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
+            .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
+            .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
+            .build());
+
+    addParameter(
+            DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
+            .withDefault(new C2StreamColorAspectsInfo::input(
+                    0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+            .withFields({
+                C2F(mColorAspects, range).inRange(
+                            C2Color::RANGE_UNSPECIFIED,     C2Color::RANGE_OTHER),
+                C2F(mColorAspects, primaries).inRange(
+                            C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
+                C2F(mColorAspects, transfer).inRange(
+                            C2Color::TRANSFER_UNSPECIFIED,  C2Color::TRANSFER_OTHER),
+                C2F(mColorAspects, matrix).inRange(
+                            C2Color::MATRIX_UNSPECIFIED,    C2Color::MATRIX_OTHER)
+            })
+            .withSetter(ColorAspectsSetter)
+            .build());
+
+    addParameter(
+            DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
+            .withDefault(new C2StreamColorAspectsInfo::output(
+                    0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+            .withFields({
+                C2F(mCodedColorAspects, range).inRange(
+                            C2Color::RANGE_UNSPECIFIED,     C2Color::RANGE_OTHER),
+                C2F(mCodedColorAspects, primaries).inRange(
+                            C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
+                C2F(mCodedColorAspects, transfer).inRange(
+                            C2Color::TRANSFER_UNSPECIFIED,  C2Color::TRANSFER_OTHER),
+                C2F(mCodedColorAspects, matrix).inRange(
+                            C2Color::MATRIX_UNSPECIFIED,    C2Color::MATRIX_OTHER)
+            })
+            .withSetter(CodedColorAspectsSetter, mColorAspects)
+            .build());
+}
+
+C2R C2SoftVpxEnc::IntfImpl::BitrateSetter(bool mayBlock, C2P<C2StreamBitrateInfo::output> &me) {
+    (void)mayBlock;
+    C2R res = C2R::Ok();
+    if (me.v.value < 4096) {
+        me.set().value = 4096;
+    }
+    return res;
+}
+
+C2R C2SoftVpxEnc::IntfImpl::SizeSetter(bool mayBlock,
+                                       const C2P<C2StreamPictureSizeInfo::input>& oldMe,
+                                       C2P<C2StreamPictureSizeInfo::input>& me) {
+    (void)mayBlock;
+    C2R res = C2R::Ok();
+    if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+        res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+        me.set().width = oldMe.v.width;
+    }
+    if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+        res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+        me.set().height = oldMe.v.height;
+    }
+    return res;
+}
+
+C2R C2SoftVpxEnc::IntfImpl::ProfileLevelSetter(bool mayBlock,
+                                               C2P<C2StreamProfileLevelInfo::output>& me) {
+    (void)mayBlock;
+    if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
+        me.set().profile = PROFILE_VP9_0;
+    }
+    if (!me.F(me.v.level).supportsAtAll(me.v.level)) {
+        me.set().level = LEVEL_VP9_4_1;
+    }
+    return C2R::Ok();
+}
+
+C2R C2SoftVpxEnc::IntfImpl::LayeringSetter(bool mayBlock,
+                                           C2P<C2StreamTemporalLayeringTuning::output>& me) {
+    (void)mayBlock;
+    C2R res = C2R::Ok();
+    if (me.v.m.layerCount > 4) {
+        me.set().m.layerCount = 4;
+    }
+    me.set().m.bLayerCount = 0;
+    // ensure ratios are monotonic and clamped between 0 and 1
+    for (size_t ix = 0; ix < me.v.flexCount(); ++ix) {
+        me.set().m.bitrateRatios[ix] = c2_clamp(
+            ix > 0 ? me.v.m.bitrateRatios[ix - 1] : 0, me.v.m.bitrateRatios[ix], 1.);
+    }
+    ALOGI("setting temporal layering %u + %u", me.v.m.layerCount, me.v.m.bLayerCount);
+    return res;
+}
+
+uint32_t C2SoftVpxEnc::IntfImpl::getSyncFramePeriod() const {
+    if (mSyncFramePeriod->value < 0 || mSyncFramePeriod->value == INT64_MAX) {
+        return 0;
+    }
+    double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
+    return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+}
+C2R C2SoftVpxEnc::IntfImpl::ColorAspectsSetter(bool mayBlock,
+                                               C2P<C2StreamColorAspectsInfo::input>& me) {
+    (void)mayBlock;
+    if (me.v.range > C2Color::RANGE_OTHER) {
+            me.set().range = C2Color::RANGE_OTHER;
+    }
+    if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+            me.set().primaries = C2Color::PRIMARIES_OTHER;
+    }
+    if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+            me.set().transfer = C2Color::TRANSFER_OTHER;
+    }
+    if (me.v.matrix > C2Color::MATRIX_OTHER) {
+            me.set().matrix = C2Color::MATRIX_OTHER;
+    }
+    return C2R::Ok();
+}
+C2R C2SoftVpxEnc::IntfImpl::CodedColorAspectsSetter(
+        bool mayBlock, C2P<C2StreamColorAspectsInfo::output>& me,
+        const C2P<C2StreamColorAspectsInfo::input>& coded) {
+    (void)mayBlock;
+    me.set().range = coded.v.range;
+    me.set().primaries = coded.v.primaries;
+    me.set().transfer = coded.v.transfer;
+    me.set().matrix = coded.v.matrix;
+    return C2R::Ok();
+}
+
 #if 0
 static size_t getCpuCoreCount() {
     long cpuCoreCount = 1;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 926b2e9..e296c8f 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -237,259 +237,38 @@
 
 class C2SoftVpxEnc::IntfImpl : public SimpleInterface<void>::BaseParams {
    public:
-    explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
-        : SimpleInterface<void>::BaseParams(
-                helper,
-                COMPONENT_NAME,
-                C2Component::KIND_ENCODER,
-                C2Component::DOMAIN_VIDEO,
-                MEDIA_MIMETYPE_VIDEO) {
-        noPrivateBuffers(); // TODO: account for our buffers here
-        noInputReferences();
-        noOutputReferences();
-        noInputLatency();
-        noTimeStretch();
-        setDerivedInstance(this);
-
-        addParameter(
-                DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
-                .withConstValue(new C2ComponentAttributesSetting(
-                    C2Component::ATTRIB_IS_TEMPORAL))
-                .build());
-
-        addParameter(
-                DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
-                .withConstValue(new C2StreamUsageTuning::input(
-                        0u, (uint64_t)C2MemoryUsage::CPU_READ))
-                .build());
-
-        addParameter(
-            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
-                .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
-                .withFields({
-                    C2F(mSize, width).inRange(2, 2048, 2),
-                    C2F(mSize, height).inRange(2, 2048, 2),
-                })
-                .withSetter(SizeSetter)
-                .build());
-
-        addParameter(
-            DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
-                .withDefault(new C2StreamBitrateModeTuning::output(
-                        0u, C2Config::BITRATE_VARIABLE))
-                .withFields({
-                    C2F(mBitrateMode, value).oneOf({
-                        C2Config::BITRATE_CONST, C2Config::BITRATE_VARIABLE })
-                })
-                .withSetter(
-                    Setter<decltype(*mBitrateMode)>::StrictValueWithNoDeps)
-                .build());
-
-        addParameter(
-            DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
-                .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
-                // TODO: More restriction?
-                .withFields({C2F(mFrameRate, value).greaterThan(0.)})
-                .withSetter(
-                    Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
-                .build());
-
-        addParameter(
-            DefineParam(mLayering, C2_PARAMKEY_TEMPORAL_LAYERING)
-                .withDefault(C2StreamTemporalLayeringTuning::output::AllocShared(0u, 0, 0, 0))
-                .withFields({
-                    C2F(mLayering, m.layerCount).inRange(0, 4),
-                    C2F(mLayering, m.bLayerCount).inRange(0, 0),
-                    C2F(mLayering, m.bitrateRatios).inRange(0., 1.)
-                })
-                .withSetter(LayeringSetter)
-                .build());
-
-        addParameter(
-                DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
-                .withDefault(new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
-                .withFields({C2F(mSyncFramePeriod, value).any()})
-                .withSetter(Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
-                .build());
-
-        addParameter(
-            DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
-                .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
-                .withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
-                .withSetter(BitrateSetter)
-                .build());
-
-        addParameter(
-                DefineParam(mIntraRefresh, C2_PARAMKEY_INTRA_REFRESH)
-                .withConstValue(new C2StreamIntraRefreshTuning::output(
-                             0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
-                .build());
-#ifdef VP9
-        addParameter(
-                DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
-                .withDefault(new C2StreamProfileLevelInfo::output(
-                        0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
-                .withFields({
-                    C2F(mProfileLevel, profile).equalTo(
-                        PROFILE_VP9_0
-                    ),
-                    C2F(mProfileLevel, level).equalTo(
-                        LEVEL_VP9_4_1),
-                })
-                .withSetter(ProfileLevelSetter)
-                .build());
-#else
-        addParameter(
-                DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
-                .withDefault(new C2StreamProfileLevelInfo::output(
-                        0u, PROFILE_VP8_0, LEVEL_UNUSED))
-                .withFields({
-                    C2F(mProfileLevel, profile).equalTo(
-                        PROFILE_VP8_0
-                    ),
-                    C2F(mProfileLevel, level).equalTo(
-                        LEVEL_UNUSED),
-                })
-                .withSetter(ProfileLevelSetter)
-                .build());
-#endif
-        addParameter(
-                DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
-                .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
-                .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
-                .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
-                .build());
-
-        addParameter(
-                DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
-                .withDefault(new C2StreamColorAspectsInfo::input(
-                        0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
-                        C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
-                .withFields({
-                    C2F(mColorAspects, range).inRange(
-                                C2Color::RANGE_UNSPECIFIED,     C2Color::RANGE_OTHER),
-                    C2F(mColorAspects, primaries).inRange(
-                                C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
-                    C2F(mColorAspects, transfer).inRange(
-                                C2Color::TRANSFER_UNSPECIFIED,  C2Color::TRANSFER_OTHER),
-                    C2F(mColorAspects, matrix).inRange(
-                                C2Color::MATRIX_UNSPECIFIED,    C2Color::MATRIX_OTHER)
-                })
-                .withSetter(ColorAspectsSetter)
-                .build());
-
-        addParameter(
-                DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
-                .withDefault(new C2StreamColorAspectsInfo::output(
-                        0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
-                        C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
-                .withFields({
-                    C2F(mCodedColorAspects, range).inRange(
-                                C2Color::RANGE_UNSPECIFIED,     C2Color::RANGE_OTHER),
-                    C2F(mCodedColorAspects, primaries).inRange(
-                                C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
-                    C2F(mCodedColorAspects, transfer).inRange(
-                                C2Color::TRANSFER_UNSPECIFIED,  C2Color::TRANSFER_OTHER),
-                    C2F(mCodedColorAspects, matrix).inRange(
-                                C2Color::MATRIX_UNSPECIFIED,    C2Color::MATRIX_OTHER)
-                })
-                .withSetter(CodedColorAspectsSetter, mColorAspects)
-                .build());
-    }
-
-    static C2R BitrateSetter(bool mayBlock, C2P<C2StreamBitrateInfo::output> &me) {
-        (void)mayBlock;
-        C2R res = C2R::Ok();
-        if (me.v.value <= 4096) {
-            me.set().value = 4096;
-        }
-        return res;
-    }
+    explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper);
+    static C2R BitrateSetter(bool mayBlock, C2P<C2StreamBitrateInfo::output> &me);
 
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::input> &oldMe,
-                          C2P<C2StreamPictureSizeInfo::input> &me) {
-        (void)mayBlock;
-        C2R res = C2R::Ok();
-        if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
-            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
-            me.set().width = oldMe.v.width;
-        }
-        if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
-            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
-            me.set().height = oldMe.v.height;
-        }
-        return res;
-    }
+                          C2P<C2StreamPictureSizeInfo::input> &me);
 
     static C2R ProfileLevelSetter(
             bool mayBlock,
-            C2P<C2StreamProfileLevelInfo::output> &me) {
-        (void)mayBlock;
-        if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
-            me.set().profile = PROFILE_VP9_0;
-        }
-        if (!me.F(me.v.level).supportsAtAll(me.v.level)) {
-            me.set().level = LEVEL_VP9_4_1;
-        }
-        return C2R::Ok();
-    }
+            C2P<C2StreamProfileLevelInfo::output> &me);
 
-    static C2R LayeringSetter(bool mayBlock, C2P<C2StreamTemporalLayeringTuning::output>& me) {
-        (void)mayBlock;
-        C2R res = C2R::Ok();
-        if (me.v.m.layerCount > 4) {
-            me.set().m.layerCount = 4;
-        }
-        me.set().m.bLayerCount = 0;
-        // ensure ratios are monotonic and clamped between 0 and 1
-        for (size_t ix = 0; ix < me.v.flexCount(); ++ix) {
-            me.set().m.bitrateRatios[ix] = c2_clamp(
-                ix > 0 ? me.v.m.bitrateRatios[ix - 1] : 0, me.v.m.bitrateRatios[ix], 1.);
-        }
-        ALOGI("setting temporal layering %u + %u", me.v.m.layerCount, me.v.m.bLayerCount);
-        return res;
-    }
+    static C2R LayeringSetter(bool mayBlock, C2P<C2StreamTemporalLayeringTuning::output>& me);
 
     // unsafe getters
     std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const { return mSize; }
-    std::shared_ptr<C2StreamIntraRefreshTuning::output> getIntraRefresh_l() const { return mIntraRefresh; }
+    std::shared_ptr<C2StreamIntraRefreshTuning::output> getIntraRefresh_l() const {
+        return mIntraRefresh;
+    }
     std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const { return mFrameRate; }
     std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const { return mBitrate; }
-    std::shared_ptr<C2StreamBitrateModeTuning::output> getBitrateMode_l() const { return mBitrateMode; }
-    std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const { return mRequestSync; }
-    std::shared_ptr<C2StreamTemporalLayeringTuning::output> getTemporalLayers_l() const { return mLayering; }
-    uint32_t getSyncFramePeriod() const {
-        if (mSyncFramePeriod->value < 0 || mSyncFramePeriod->value == INT64_MAX) {
-            return 0;
-        }
-        double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
-        return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+    std::shared_ptr<C2StreamBitrateModeTuning::output> getBitrateMode_l() const {
+        return mBitrateMode;
     }
-    static C2R ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::input> &me) {
-        (void)mayBlock;
-        if (me.v.range > C2Color::RANGE_OTHER) {
-                me.set().range = C2Color::RANGE_OTHER;
-        }
-        if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
-                me.set().primaries = C2Color::PRIMARIES_OTHER;
-        }
-        if (me.v.transfer > C2Color::TRANSFER_OTHER) {
-                me.set().transfer = C2Color::TRANSFER_OTHER;
-        }
-        if (me.v.matrix > C2Color::MATRIX_OTHER) {
-                me.set().matrix = C2Color::MATRIX_OTHER;
-        }
-        return C2R::Ok();
+    std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const {
+        return mRequestSync;
     }
+    std::shared_ptr<C2StreamTemporalLayeringTuning::output> getTemporalLayers_l() const {
+        return mLayering;
+    }
+    uint32_t getSyncFramePeriod() const;
+    static C2R ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::input> &me);
     static C2R CodedColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::output> &me,
-                                       const C2P<C2StreamColorAspectsInfo::input> &coded) {
-        (void)mayBlock;
-        me.set().range = coded.v.range;
-        me.set().primaries = coded.v.primaries;
-        me.set().transfer = coded.v.transfer;
-        me.set().matrix = coded.v.matrix;
-        return C2R::Ok();
-    }
+                                       const C2P<C2StreamColorAspectsInfo::input> &coded);
 
    private:
     std::shared_ptr<C2StreamUsageTuning::input> mUsage;
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 2cc7ab7..feaa98c 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -1673,7 +1673,7 @@
     SYNC_FRAME = (1 << 0),  ///< sync frame, e.g. IDR
     I_FRAME    = (1 << 1),  ///< intra frame that is completely encoded
     P_FRAME    = (1 << 2),  ///< inter predicted frame from previous frames
-    B_FRAME    = (1 << 3),  ///< backward predicted (out-of-order) frame
+    B_FRAME    = (1 << 3),  ///< bidirectional predicted (out-of-order) frame
 )
 
 /**
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 7c4bfb6..67d7ed2 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -96,9 +96,12 @@
         return false;
     }
 
-    // determine if codec supports HDR
+    // determine if codec supports HDR; imply 10-bit support
     bool supportsHdr = false;
+    // determine if codec supports HDR10Plus; imply 10-bit support
     bool supportsHdr10Plus = false;
+    // determine if codec supports 10-bit format
+    bool supports10Bit = false;
 
     std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
     c2_status_t err1 = intf->querySupportedParams(&paramDescs);
@@ -126,6 +129,10 @@
     supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
     supportsHdr |= (mediaType == MIMETYPE_VIDEO_AV1);
 
+    // HDR support implies 10-bit support.
+    // TODO: directly check this from the component interface
+    supports10Bit = (supportsHdr || supportsHdr10Plus);
+
     bool added = false;
 
     for (C2Value::Primitive profile : profileQuery[0].values.values) {
@@ -165,6 +172,12 @@
                     }
                 }
             }
+            if (supports10Bit) {
+                auto bitnessMapper = C2Mapper::GetBitDepthProfileLevelMapper(trait.mediaType, 10);
+                if (bitnessMapper && bitnessMapper->mapProfile(pl.profile, &sdkProfile)) {
+                    caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+                }
+            }
         } else if (!mapper) {
             caps->addProfileLevel(pl.profile, pl.level);
         }
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 4d939fa..ca6a328 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -381,15 +381,17 @@
     { C2Config::LEVEL_AV1_7_3,  AV1Level73 },
 };
 
-
 ALookup<C2Config::profile_t, int32_t> sAv1Profiles = {
-    // TODO: will need to disambiguate between Main8 and Main10
     { C2Config::PROFILE_AV1_0, AV1ProfileMain8 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
 };
 
+ALookup<C2Config::profile_t, int32_t> sAv1TenbitProfiles = {
+    { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
+};
+
 ALookup<C2Config::profile_t, int32_t> sAv1HdrProfiles = {
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
 };
@@ -603,9 +605,9 @@
 };
 
 struct Av1ProfileLevelMapper : ProfileLevelMapperHelper {
-    Av1ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false) :
+    Av1ProfileLevelMapper(bool isHdr = false, bool isHdr10Plus = false, int32_t bitDepth = 8) :
         ProfileLevelMapperHelper(),
-        mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus) {}
+        mIsHdr(isHdr), mIsHdr10Plus(isHdr10Plus), mBitDepth(bitDepth) {}
 
     virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
         return sAv1Levels.map(from, to);
@@ -614,19 +616,22 @@
         return sAv1Levels.map(from, to);
     }
     virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
-        return mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
-                     mIsHdr ? sAv1HdrProfiles.map(from, to) :
-                              sAv1Profiles.map(from, to);
+        return (mBitDepth == 10) ? sAv1TenbitProfiles.map(from, to) :
+                    mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
+                          mIsHdr ? sAv1HdrProfiles.map(from, to) :
+                                   sAv1Profiles.map(from, to);
     }
     virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
-        return mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
-                     mIsHdr ? sAv1HdrProfiles.map(from, to) :
-                              sAv1Profiles.map(from, to);
+        return (mBitDepth == 10) ? sAv1TenbitProfiles.map(from, to) :
+                    mIsHdr10Plus ? sAv1Hdr10PlusProfiles.map(from, to) :
+                          mIsHdr ? sAv1HdrProfiles.map(from, to) :
+                                   sAv1Profiles.map(from, to);
     }
 
 private:
     bool mIsHdr;
     bool mIsHdr10Plus;
+    int32_t mBitDepth;
 };
 
 } // namespace
@@ -674,6 +679,18 @@
 }
 
 // static
+std::shared_ptr<C2Mapper::ProfileLevelMapper>
+C2Mapper::GetBitDepthProfileLevelMapper(std::string mediaType, int32_t bitDepth) {
+    std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
+    if (bitDepth == 8) {
+        return GetProfileLevelMapper(mediaType);
+    } else if (mediaType == MIMETYPE_VIDEO_AV1 && bitDepth == 10) {
+        return std::make_shared<Av1ProfileLevelMapper>(false, false, bitDepth);
+    }
+    return nullptr;
+}
+
+// static
 bool C2Mapper::map(C2Config::bitrate_mode_t from, int32_t *to) {
     return sBitrateModes.map(from, to);
 }
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.h b/media/codec2/sfplugin/utils/Codec2Mapper.h
index 797c8a8..33d305e 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.h
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.h
@@ -43,6 +43,9 @@
         static std::shared_ptr<ProfileLevelMapper>
         GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus = false);
 
+        static std::shared_ptr<ProfileLevelMapper>
+        GetBitDepthProfileLevelMapper(std::string mediaType, int32_t bitDepth = 8);
+
         // convert between bitrates
         static bool map(C2Config::bitrate_mode_t, int32_t*);
         static bool map(int32_t, C2Config::bitrate_mode_t*);
diff --git a/media/extractors/TEST_MAPPING b/media/extractors/TEST_MAPPING
index 4984b8f..a7c2cfe 100644
--- a/media/extractors/TEST_MAPPING
+++ b/media/extractors/TEST_MAPPING
@@ -1,6 +1,9 @@
 {
   "presubmit": [
 
+        {
+            "name": "CtsMediaTranscodingTestCases"
+        }
     // TODO(b/153661591) enable test once the bug is fixed
     // This tests the extractor path
     // {
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index 0f24771..38f3c24 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -37,15 +37,6 @@
         : AudioStream() {
 }
 
-// Called from AudioTrack.cpp or AudioRecord.cpp
-static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
-    AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
-    streamLegacy->processCallback(event, info);
-}
-
-aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
-    return AudioStreamLegacy_callback;
-}
 
 aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
                                                                         int32_t numFrames) {
@@ -73,84 +64,77 @@
     return (int32_t) callDataCallbackFrames(buffer, numFrames);
 }
 
-void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
-    aaudio_data_callback_result_t callbackResult;
+
+void AudioStreamLegacy::onNewIAudioTrack() {
+    ALOGD("%s stream disconnected", __func__);
+    forceDisconnect();
+    mCallbackEnabled.store(false);
+}
+
+size_t AudioStreamLegacy::onMoreData(const android::AudioTrack::Buffer& buffer) {
     // This illegal size can be used to tell AudioRecord or AudioTrack to stop calling us.
     // This takes advantage of them killing the stream when they see a size out of range.
     // That is an undocumented behavior.
     // TODO add to API in AudioRecord and AudioTrack
     const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
+    aaudio_data_callback_result_t callbackResult;
+    (void) checkForDisconnectRequest(true);
 
-    switch (opcode) {
-        case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
-            (void) checkForDisconnectRequest(true);
-
-            // Note that this code assumes an AudioTrack::Buffer is the same as
-            // AudioRecord::Buffer
-            // TODO define our own AudioBuffer and pass it from the subclasses.
-            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
-            if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
-                ALOGW("processCallbackCommon() data, stream disconnected");
-                // This will kill the stream and prevent it from being restarted.
-                // That is OK because the stream is disconnected.
-                audioBuffer->size = SIZE_STOP_CALLBACKS;
-            } else if (!mCallbackEnabled.load()) {
-                ALOGW("processCallbackCommon() no data because callback disabled, set size=0");
-                // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
-                // prevent it from being restarted. This can occur because of a race condition
-                // caused by Legacy callbacks running after the track is "stopped".
-                audioBuffer->size = 0;
-            } else {
-                if (audioBuffer->frameCount == 0) {
-                    ALOGW("processCallbackCommon() data, frameCount is zero");
-                    return;
-                }
-
-                // If the caller specified an exact size then use a block size adapter.
-                if (mBlockAdapter != nullptr) {
-                    int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                    callbackResult = mBlockAdapter->processVariableBlock(
-                            (uint8_t *) audioBuffer->raw, byteCount);
-                } else {
-                    // Call using the AAudio callback interface.
-                    callbackResult = callDataCallbackFrames((uint8_t *)audioBuffer->raw,
-                                                            audioBuffer->frameCount);
-                }
-                if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
-                    audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                } else {
-                    if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
-                        ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
-                    } else {
-                        ALOGW("%s() callback returned invalid result = %d",
-                              __func__, callbackResult);
-                    }
-                    audioBuffer->size = 0;
-                    systemStopInternal();
-                    // Disable the callback just in case the system keeps trying to call us.
-                    mCallbackEnabled.store(false);
-                }
-
-                if (updateStateMachine() != AAUDIO_OK) {
-                    forceDisconnect();
-                    mCallbackEnabled.store(false);
-                }
-            }
+    // Note that this code assumes an AudioTrack::Buffer is the same as
+    // AudioRecord::Buffer
+    // TODO define our own AudioBuffer and pass it from the subclasses.
+    size_t written = buffer.size;
+    if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+        ALOGW("%s() data, stream disconnected", __func__);
+        // This will kill the stream and prevent it from being restarted.
+        // That is OK because the stream is disconnected.
+        written = SIZE_STOP_CALLBACKS;
+    } else if (!mCallbackEnabled.load()) {
+        ALOGW("%s() no data because callback disabled, set size=0", __func__);
+        // Do NOT use SIZE_STOP_CALLBACKS here because that will kill the stream and
+        // prevent it from being restarted. This can occur because of a race condition
+        // caused by Legacy callbacks running after the track is "stopped".
+        written = 0;
+    } else {
+        if (buffer.frameCount == 0) {
+            ALOGW("%s() data, frameCount is zero", __func__);
+            return written;
         }
-            break;
 
-        // Stream got rerouted so we disconnect.
-        case AAUDIO_CALLBACK_OPERATION_DISCONNECTED:
-            ALOGD("processCallbackCommon() stream disconnected");
+        // If the caller specified an exact size then use a block size adapter.
+        if (mBlockAdapter != nullptr) {
+            int32_t byteCount = buffer.frameCount * getBytesPerDeviceFrame();
+            callbackResult = mBlockAdapter->processVariableBlock(
+                    static_cast<uint8_t*>(buffer.raw), byteCount);
+        } else {
+            // Call using the AAudio callback interface.
+            callbackResult = callDataCallbackFrames(static_cast<uint8_t *>(buffer.raw),
+                                                    buffer.frameCount);
+        }
+        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+            written = buffer.frameCount * getBytesPerDeviceFrame();
+        } else {
+            if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+                ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            } else {
+                ALOGW("%s() callback returned invalid result = %d",
+                      __func__, callbackResult);
+            }
+            written = 0;
+            systemStopInternal();
+            // Disable the callback just in case the system keeps trying to call us.
+            mCallbackEnabled.store(false);
+        }
+
+        if (updateStateMachine() != AAUDIO_OK) {
             forceDisconnect();
             mCallbackEnabled.store(false);
-            break;
-
-        default:
-            break;
+        }
     }
+    return written;
 }
 
+
 aaudio_result_t AudioStreamLegacy::checkForDisconnectRequest(bool errorCallbackEnabled) {
     if (mRequestDisconnect.isRequested()) {
         ALOGD("checkForDisconnectRequest() mRequestDisconnect acknowledged");
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index d9ba990..c54d7e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -18,6 +18,7 @@
 #define LEGACY_AUDIO_STREAM_LEGACY_H
 
 #include <media/AudioTimestamp.h>
+#include <media/AudioTrack.h>
 #include <media/AudioSystem.h>
 
 #include <aaudio/AAudio.h>
@@ -30,8 +31,6 @@
 namespace aaudio {
 
 
-typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
-
 enum {
     /**
      * Request that the callback function should fill the data buffer of an output stream,
@@ -56,21 +55,17 @@
 typedef int32_t aaudio_callback_operation_t;
 
 
-class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+class AudioStreamLegacy : public AudioStream,
+                          public FixedBlockProcessor,
+                          protected android::AudioTrack::IAudioTrackCallback {
 public:
     AudioStreamLegacy();
 
     virtual ~AudioStreamLegacy() = default;
 
-    aaudio_legacy_callback_t getLegacyCallback();
 
     int32_t callDataCallbackFrames(uint8_t *buffer, int32_t numFrames);
 
-    // This is public so it can be called from the C callback function.
-    // This is called from the AudioTrack/AudioRecord client.
-    virtual void processCallback(int event, void *info) = 0;
-
-    void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
 
     // Implement FixedBlockProcessor
     int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
@@ -86,7 +81,8 @@
     }
 
 protected:
-
+    size_t onMoreData(const android::AudioTrack::Buffer& buffer) override;
+    void onNewIAudioTrack() override;
     aaudio_result_t getBestTimestamp(clockid_t clockId,
                                      int64_t *framePosition,
                                      int64_t *timeNanoseconds,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 12771cc..df7d4cf 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -37,6 +37,10 @@
 using namespace android;
 using namespace aaudio;
 
+static void sCallbackWrapper(int event, void* userData, void* info) {
+    static_cast<AudioStreamRecord*>(userData)->processCallback(event, info);
+}
+
 AudioStreamRecord::AudioStreamRecord()
     : AudioStreamLegacy()
     , mFixedBlockWriter(*this)
@@ -124,12 +128,12 @@
     uint32_t notificationFrames = 0;
 
     // Setup the callback if there is one.
-    AudioRecord::callback_t callback = nullptr;
+    AudioRecord::legacy_callback_t callback = nullptr;
     void *callbackData = nullptr;
     AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
     if (builder.getDataCallbackProc() != nullptr) {
         streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
-        callback = getLegacyCallback();
+        callback = sCallbackWrapper;
         callbackData = this;
     }
     mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -353,14 +357,15 @@
 void AudioStreamRecord::processCallback(int event, void *info) {
     switch (event) {
         case AudioRecord::EVENT_MORE_DATA:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+        {
+            AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+            audioBuffer->size = onMoreData(*audioBuffer);
             break;
-
+        }
             // Stream got rerouted so we disconnect.
         case AudioRecord::EVENT_NEW_IAUDIORECORD:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+            onNewIAudioTrack();
             break;
-
         default:
             break;
     }
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 692651d..5ce73f9 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -65,7 +65,9 @@
     }
 
     // This is public so it can be called from the C callback function.
-    void processCallback(int event, void *info) override;
+    void processCallback(int event, void *info);
+
+    void processCallbackRecord(aaudio_callback_operation_t opcode, void *info);
 
     int64_t incrementClientFrameCounter(int32_t frames) override {
         return incrementFramesRead(frames);
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index fe84ec5..17a6d0c 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -103,14 +103,12 @@
             : getFormat();
 
     // Setup the callback if there is one.
-    AudioTrack::legacy_callback_t callback = nullptr;
-    void *callbackData = nullptr;
+    wp<AudioTrack::IAudioTrackCallback> callback;
     // Note that TRANSFER_SYNC does not allow FAST track
     AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
     if (builder.getDataCallbackProc() != nullptr) {
         streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
-        callback = getLegacyCallback();
-        callbackData = this;
+        callback = wp<AudioTrack::IAudioTrackCallback>::fromExisting(this);
 
         // If the total buffer size is unspecified then base the size on the burst size.
         if (frameCount == 0
@@ -157,7 +155,6 @@
             frameCount,
             flags,
             callback,
-            callbackData,
             notificationFrames,
             nullptr,       // DEFAULT sharedBuffer*/,
             false,   // DEFAULT threadCanCallJava
@@ -293,31 +290,19 @@
     AudioStream::close_l();
 }
 
-void AudioStreamTrack::processCallback(int event, void *info) {
 
-    switch (event) {
-        case AudioTrack::EVENT_MORE_DATA:
-            processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
-            break;
-
-            // Stream got rerouted so we disconnect.
-        case AudioTrack::EVENT_NEW_IAUDIOTRACK:
-            // request stream disconnect if the restored AudioTrack has properties not matching
-            // what was requested initially
-            if (mAudioTrack->channelCount() != getSamplesPerFrame()
-                    || mAudioTrack->format() != getFormat()
-                    || mAudioTrack->getSampleRate() != getSampleRate()
-                    || mAudioTrack->getRoutedDeviceId() != getDeviceId()
-                    || getBufferCapacityFromDevice() != getBufferCapacity()
-                    || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
-                processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
-            }
-            break;
-
-        default:
-            break;
+void AudioStreamTrack::onNewIAudioTrack() {
+    // Stream got rerouted so we disconnect.
+    // request stream disconnect if the restored AudioTrack has properties not matching
+    // what was requested initially
+    if (mAudioTrack->channelCount() != getSamplesPerFrame()
+          || mAudioTrack->format() != getFormat()
+          || mAudioTrack->getSampleRate() != getSampleRate()
+          || mAudioTrack->getRoutedDeviceId() != getDeviceId()
+          || getBufferCapacityFromDevice() != getBufferCapacity()
+          || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
+        AudioStreamLegacy::onNewIAudioTrack();
     }
-    return;
 }
 
 aaudio_result_t AudioStreamTrack::requestStart_l() {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index f604871..0f4d72b 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -81,9 +81,6 @@
 
     aaudio_result_t updateStateMachine() override;
 
-    // This is public so it can be called from the C callback function.
-    void processCallback(int event, void *info) override;
-
     int64_t incrementClientFrameCounter(int32_t frames) override {
         return incrementFramesWritten(frames);
     }
@@ -100,6 +97,7 @@
 
     int32_t getFramesPerBurstFromDevice() const override;
     int32_t getBufferCapacityFromDevice() const override;
+    void onNewIAudioTrack() override;
 
 private:
 
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 8314320..4c83406 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -3033,6 +3033,10 @@
             return AUDIO_MODE_IN_COMMUNICATION;
         case AudioMode::CALL_SCREEN:
             return AUDIO_MODE_CALL_SCREEN;
+        case AudioMode::SYS_RESERVED_CALL_REDIRECT:
+            return AUDIO_MODE_CALL_REDIRECT;
+        case AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT:
+            return AUDIO_MODE_COMMUNICATION_REDIRECT;
     }
     return unexpected(BAD_VALUE);
 }
@@ -3054,6 +3058,10 @@
             return AudioMode::IN_COMMUNICATION;
         case AUDIO_MODE_CALL_SCREEN:
             return AudioMode::CALL_SCREEN;
+        case AUDIO_MODE_CALL_REDIRECT:
+            return AudioMode::SYS_RESERVED_CALL_REDIRECT;
+        case AUDIO_MODE_COMMUNICATION_REDIRECT:
+            return AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT;
         case AUDIO_MODE_CNT:
             break;
     }
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index d3c5231..ac128e6 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -143,7 +143,7 @@
         audio_channel_mask_t channelMask,
         const AttributionSourceState& client,
         size_t frameCount,
-        callback_t cbf,
+        legacy_callback_t callback,
         void* user,
         uint32_t notificationFrames,
         audio_session_t sessionId,
@@ -163,7 +163,39 @@
 {
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
-    (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback, user,
+            notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
+            uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
+            microphoneFieldDimension);
+}
+
+AudioRecord::AudioRecord(
+        audio_source_t inputSource,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        const AttributionSourceState& client,
+        size_t frameCount,
+        const wp<IAudioRecordCallback>& callback,
+        uint32_t notificationFrames,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags,
+        const audio_attributes_t* pAttributes,
+        audio_port_handle_t selectedDeviceId,
+        audio_microphone_direction_t selectedMicDirection,
+        float microphoneFieldDimension)
+    : mActive(false),
+      mStatus(NO_INIT),
+      mClientAttributionSource(client),
+      mSessionId(AUDIO_SESSION_ALLOCATE),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(SP_DEFAULT),
+      mProxy(nullptr)
+{
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
+    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
             uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
             microphoneFieldDimension);
@@ -218,14 +250,44 @@
         AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
     }
 }
+namespace {
+class LegacyCallbackWrapper : public AudioRecord::IAudioRecordCallback {
+    const AudioRecord::legacy_callback_t mCallback;
+    void* const mData;
+
+  public:
+    LegacyCallbackWrapper(AudioRecord::legacy_callback_t callback, void* user)
+        : mCallback(callback), mData(user) {}
+
+    size_t onMoreData(const AudioRecord::Buffer& buffer) override {
+        AudioRecord::Buffer copy = buffer;
+        mCallback(AudioRecord::EVENT_MORE_DATA, mData, &copy);
+        return copy.size;
+    }
+
+    void onOverrun() override { mCallback(AudioRecord::EVENT_OVERRUN, mData, nullptr); }
+
+    void onMarker(uint32_t markerPosition) override {
+        mCallback(AudioRecord::EVENT_MARKER, mData, &markerPosition);
+    }
+
+    void onNewPos(uint32_t newPos) override {
+        mCallback(AudioRecord::EVENT_NEW_POS, mData, &newPos);
+    }
+
+    void onNewIAudioRecord() override {
+        mCallback(AudioRecord::EVENT_NEW_IAUDIORECORD, mData, nullptr);
+    }
+};
+}  // namespace
+
 status_t AudioRecord::set(
         audio_source_t inputSource,
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t frameCount,
-        callback_t cbf,
-        void* user,
+        const wp<IAudioRecordCallback>& callback,
         uint32_t notificationFrames,
         bool threadCanCallJava,
         audio_session_t sessionId,
@@ -241,7 +303,7 @@
 {
     status_t status = NO_ERROR;
     uint32_t channelCount;
-
+    const sp<IAudioRecordCallback> callbackHandle = callback.promote();
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
@@ -274,15 +336,15 @@
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
-        if (cbf == NULL || threadCanCallJava) {
+        if (callbackHandle == nullptr || threadCanCallJava) {
             transferType = TRANSFER_SYNC;
         } else {
             transferType = TRANSFER_CALLBACK;
         }
         break;
     case TRANSFER_CALLBACK:
-        if (cbf == NULL) {
-            ALOGE("%s(): Transfer type TRANSFER_CALLBACK but cbf == NULL", __func__);
+        if (callbackHandle == nullptr) {
+            ALOGE("%s(): Transfer type TRANSFER_CALLBACK but callback == nullptr", __func__);
             status = BAD_VALUE;
             goto exit;
         }
@@ -304,7 +366,7 @@
         goto exit;
     }
 
-    if (pAttributes == NULL) {
+    if (pAttributes == nullptr) {
         mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
         mAttributes.source = inputSource;
         if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
@@ -360,9 +422,9 @@
     ALOGV("%s(): mSessionId %d", __func__, mSessionId);
 
     mOrigFlags = mFlags = flags;
-    mCbf = cbf;
+    mCallback = callbackHandle;
 
-    if (cbf != NULL) {
+    if (mCallback != nullptr) {
         mAudioRecordThread = new AudioRecordThread(*this);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
         // thread begins in paused state, and will not reference us until start()
@@ -385,7 +447,6 @@
         goto exit;
     }
 
-    mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000LL * mFrameCount) / mSampleRate;
     mMarkerPosition = 0;
@@ -407,6 +468,37 @@
     return status;
 }
 
+status_t AudioRecord::set(
+        audio_source_t inputSource,
+        uint32_t sampleRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        size_t frameCount,
+        legacy_callback_t callback,
+        void* user,
+        uint32_t notificationFrames,
+        bool threadCanCallJava,
+        audio_session_t sessionId,
+        transfer_type transferType,
+        audio_input_flags_t flags,
+        uid_t uid,
+        pid_t pid,
+        const audio_attributes_t* pAttributes,
+        audio_port_handle_t selectedDeviceId,
+        audio_microphone_direction_t selectedMicDirection,
+        float microphoneFieldDimension,
+        int32_t maxSharedAudioHistoryMs)
+{
+    if (callback != nullptr) {
+        mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
+    } else if (user) {
+        LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
+    }
+    return set(inputSource, sampleRate, format, channelMask, frameCount, mLegacyCallbackWrapper,
+        notificationFrames, threadCanCallJava, sessionId, transferType, flags, uid, pid,
+        pAttributes, selectedDeviceId, selectedMicDirection, microphoneFieldDimension,
+        maxSharedAudioHistoryMs);
+}
 // -------------------------------------------------------------------------
 
 status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
@@ -536,12 +628,12 @@
 
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
+    AutoMutex lock(mLock);
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL) {
+    if (mCallback.promote() == nullptr) {
         return INVALID_OPERATION;
     }
 
-    AutoMutex lock(mLock);
     mMarkerPosition = marker;
     mMarkerReached = false;
 
@@ -566,12 +658,12 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
+    AutoMutex lock(mLock);
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL) {
+    if (mCallback.promote() == nullptr) {
         return INVALID_OPERATION;
     }
 
-    AutoMutex lock(mLock);
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
@@ -757,7 +849,7 @@
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     IAudioFlinger::CreateRecordInput input;
     IAudioFlinger::CreateRecordOutput output;
-    audio_session_t originalSessionId;
+    [[maybe_unused]] audio_session_t originalSessionId;
     void *iMemPointer;
     audio_track_cblk_t* cblk;
     status_t status;
@@ -926,7 +1018,7 @@
                 mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
     }
     mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
-    if (mServerConfig.format != mFormat && mCbf != nullptr) {
+    if (mServerConfig.format != mFormat && mCallback.promote() != nullptr) {
         mFormatConversionBufRaw = std::make_unique<uint8_t[]>(mNotificationFramesAct * mFrameSize);
         mFormatConversionBuffer.raw = mFormatConversionBufRaw.get();
     }
@@ -1182,6 +1274,11 @@
 nsecs_t AudioRecord::processAudioBuffer()
 {
     mLock.lock();
+    const sp<IAudioRecordCallback> callback = mCallback.promote();
+    if (!callback) {
+        mCallback = nullptr;
+        return NS_NEVER;
+    }
     if (mAwaitBoost) {
         mAwaitBoost = false;
         mLock.unlock();
@@ -1257,26 +1354,26 @@
     uint32_t sequence = mSequence;
 
     // These fields don't need to be cached, because they are assigned only by set():
-    //      mTransfer, mCbf, mUserData, mSampleRate, mFrameSize
+    //      mTransfer, mCallback, mUserData, mSampleRate, mFrameSize
 
     mLock.unlock();
 
     // perform callbacks while unlocked
     if (newOverrun) {
-        mCbf(EVENT_OVERRUN, mUserData, NULL);
+        callback->onOverrun();
+
     }
     if (markerReached) {
-        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+        callback->onMarker(markerPosition.value());
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
-        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        callback->onNewPos(newPosition.value());
         newPosition += updatePeriod;
         newPosCount--;
     }
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
-        mCbf(EVENT_NEW_IAUDIORECORD, mUserData, NULL);
+        callback->onNewIAudioRecord();
     }
 
     // if inactive, then don't run me again until re-started
@@ -1370,9 +1467,9 @@
                                    mServerConfig.format, audioBuffer.size / mServerSampleSize);
         }
 
-        size_t reqSize = buffer->size;
-        mCbf(EVENT_MORE_DATA, mUserData, buffer);
-        size_t readSize = buffer->size;
+        const size_t reqSize = buffer->size;
+        const size_t readSize = callback->onMoreData(*buffer);
+        buffer->size = readSize;
 
         // Validate on returned size
         if (ssize_t(readSize) < 0 || readSize > reqSize) {
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 9617556..d9fddf3 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -25,6 +25,7 @@
 
 #include <android/media/IAudioPolicyService.h>
 #include <android-base/macros.h>
+#include <android-base/stringprintf.h>
 #include <audio_utils/clock.h>
 #include <audio_utils/primitives.h>
 #include <binder/IPCThreadState.h>
@@ -44,6 +45,7 @@
 static const int kMaxLoopCountNotifications = 32;
 
 using ::android::aidl_utils::statusTFromBinderStatus;
+using ::android::base::StringPrintf;
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -539,6 +541,7 @@
     uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
     pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
     sp<IAudioTrackCallback> _callback = callback.promote();
+    std::string errorMessage;
     // Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
     ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
@@ -563,32 +566,34 @@
     case TRANSFER_CALLBACK:
     case TRANSFER_SYNC_NOTIF_CALLBACK:
         if (_callback == nullptr || sharedBuffer != 0) {
-            ALOGE("%s(): Transfer type %s but callback == nullptr || sharedBuffer != 0",
+            errorMessage = StringPrintf(
+                    "%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
                     convertTransferToText(transferType), __func__);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         break;
     case TRANSFER_OBTAIN:
     case TRANSFER_SYNC:
         if (sharedBuffer != 0) {
-            ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
+            errorMessage = StringPrintf(
+                    "%s: Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         break;
     case TRANSFER_SHARED:
         if (sharedBuffer == 0) {
-            ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
+            errorMessage = StringPrintf(
+                    "%s: Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         break;
     default:
-        ALOGE("%s(): Invalid transfer type %d",
-                __func__, transferType);
+        errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, transferType);
         status = BAD_VALUE;
-        goto exit;
+        goto error;
     }
     mSharedBuffer = sharedBuffer;
     mTransfer = transferType;
@@ -602,9 +607,9 @@
 
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
-        ALOGE("%s(): Track already in use", __func__);
+        errorMessage = StringPrintf("%s: Track already in use", __func__);
         status = INVALID_OPERATION;
-        goto exit;
+        goto error;
     }
 
     // handle default values first.
@@ -613,9 +618,9 @@
     }
     if (pAttributes == NULL) {
         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
-            ALOGE("%s(): Invalid stream type %d", __func__, streamType);
+            errorMessage = StringPrintf("%s: Invalid stream type %d", __func__, streamType);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         mOriginalStreamType = streamType;
 
@@ -639,16 +644,16 @@
 
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("%s(): Invalid format %#x", __func__, format);
+        errorMessage = StringPrintf("%s: Invalid format %#x", __func__, format);
         status = BAD_VALUE;
-        goto exit;
+        goto error;
     }
     mFormat = format;
 
     if (!audio_is_output_channel(channelMask)) {
-        ALOGE("%s(): Invalid channel mask %#x",  __func__, channelMask);
+        errorMessage = StringPrintf("%s: Invalid channel mask %#x",  __func__, channelMask);
         status = BAD_VALUE;
-        goto exit;
+        goto error;
     }
     mChannelMask = channelMask;
     channelCount = audio_channel_count_from_out_mask(channelMask);
@@ -687,8 +692,10 @@
 
     // sampling rate must be specified for direct outputs
     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+        errorMessage = StringPrintf(
+                "%s: sample rate must be specified for direct outputs", __func__);
         status = BAD_VALUE;
-        goto exit;
+        goto error;
     }
     mSampleRate = sampleRate;
     mOriginalSampleRate = sampleRate;
@@ -718,16 +725,17 @@
         mNotificationsPerBufferReq = 0;
     } else {
         if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
-            ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
+            errorMessage = StringPrintf(
+                    "%s: notificationFrames=%d not permitted for non-fast track",
                     __func__, notificationFrames);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         if (frameCount > 0) {
             ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
                     __func__, notificationFrames, frameCount);
             status = BAD_VALUE;
-            goto exit;
+            goto error;
         }
         mNotificationFramesReq = 0;
         const uint32_t minNotificationsPerBuffer = 1;
@@ -772,6 +780,7 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        // We do not goto error to prevent double-logging errors.
         goto exit;
     }
 
@@ -805,6 +814,12 @@
     mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
     mVolumeHandler = new media::VolumeHandler();
 
+error:
+    if (status != NO_ERROR) {
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
+    }
+    // fall through
 exit:
     mStatus = status;
     return status;
@@ -1832,12 +1847,13 @@
 {
     status_t status;
     bool callbackAdded = false;
+    std::string errorMessage;
 
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
-        ALOGE("%s(%d): Could not get audioflinger",
+        errorMessage = StringPrintf("%s(%d): Could not get audioflinger",
                 __func__, mPortId);
-        status = NO_INIT;
+        status = DEAD_OBJECT;
         goto exit;
     }
 
@@ -1914,10 +1930,11 @@
     }
 
     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
+        errorMessage = StringPrintf(
+                "%s(%d): AudioFlinger could not create track, status: %d output %d",
                 __func__, mPortId, status, output.outputId);
         if (status == NO_ERROR) {
-            status = NO_INIT;
+            status = INVALID_OPERATION; // device not ready
         }
         goto exit;
     }
@@ -1948,8 +1965,8 @@
     output.audioTrack->getCblk(&sfr);
     sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
     if (iMem == 0) {
-        ALOGE("%s(%d): Could not get control block", __func__, mPortId);
-        status = NO_INIT;
+        errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
+        status = FAILED_TRANSACTION;
         goto exit;
     }
     // TODO: Using unsecurePointer() has some associated security pitfalls
@@ -1958,8 +1975,9 @@
     //       issue (e.g. by copying).
     void *iMemPointer = iMem->unsecurePointer();
     if (iMemPointer == NULL) {
-        ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
-        status = NO_INIT;
+        errorMessage = StringPrintf(
+                "%s(%d): Could not get control block pointer", __func__, mPortId);
+        status = FAILED_TRANSACTION;
         goto exit;
     }
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
@@ -2017,8 +2035,10 @@
         //       issue (e.g. by copying).
         buffers = mSharedBuffer->unsecurePointer();
         if (buffers == NULL) {
-            ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
-            status = NO_INIT;
+            errorMessage = StringPrintf(
+                    "%s(%d): Could not get buffer pointer", __func__, mPortId);
+            ALOGE("%s", errorMessage.c_str());
+            status = FAILED_TRANSACTION;
             goto exit;
         }
     }
@@ -2116,17 +2136,44 @@
     }
 
 exit:
-    if (status != NO_ERROR && callbackAdded) {
-        // note: mOutput is always valid is callbackAdded is true
-        AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+    if (status != NO_ERROR) {
+        if (callbackAdded) {
+            // note: mOutput is always valid is callbackAdded is true
+            AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+        }
+        ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+        reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
     }
-
     mStatus = status;
 
     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
 }
 
+void AudioTrack::reportError(status_t status, const char *event, const char *message) const
+{
+    if (status == NO_ERROR) return;
+    // We report error on the native side because some callers do not come
+    // from Java.
+    mediametrics::LogItem(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + "error")
+        .set(AMEDIAMETRICS_PROP_EVENT, event)
+        .set(AMEDIAMETRICS_PROP_ERROR, mediametrics::statusToErrorString(status))
+        .set(AMEDIAMETRICS_PROP_ERRORMESSAGE, message)
+        .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
+        .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
+        .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
+        .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
+        .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
+        .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
+        .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
+        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mReqFrameCount) // requested frame count
+        // the following are NOT immutable
+        .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
+        .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
+        .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
+        .record();
+}
+
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index 80fe1ba..4c89249 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -58,7 +58,8 @@
 
 constexpr audio_mode_t kModes[] = {
     AUDIO_MODE_INVALID, AUDIO_MODE_CURRENT,          AUDIO_MODE_NORMAL,     AUDIO_MODE_RINGTONE,
-    AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN};
+    AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN,
+    AUDIO_MODE_CALL_REDIRECT, AUDIO_MODE_COMMUNICATION_REDIRECT};
 
 constexpr audio_session_t kSessionId[] = {AUDIO_SESSION_NONE, AUDIO_SESSION_OUTPUT_STAGE,
                                           AUDIO_SESSION_DEVICE};
@@ -231,7 +232,7 @@
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
     attributionSource.token = sp<BBinder>::make();
     track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags, nullptr,
-               nullptr, notificationFrames, sharedBuffer, false, sessionId,
+               notificationFrames, sharedBuffer, false, sessionId,
                ((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
                                                         : AudioTrack::TRANSFER_DEFAULT,
                offload ? &offloadInfo : nullptr, attributionSource, &attributes, false, 1.0f,
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index dd4d2da..ee262f3 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -40,7 +40,7 @@
 
 // ----------------------------------------------------------------------------
 
-class AudioEffect : public RefBase
+class AudioEffect : public virtual RefBase
 {
 public:
 
diff --git a/media/libaudioclient/include/media/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
index ef729ed..405ec7d 100644
--- a/media/libaudioclient/include/media/AudioIoDescriptor.h
+++ b/media/libaudioclient/include/media/AudioIoDescriptor.h
@@ -39,7 +39,7 @@
 
 // audio input/output descriptor used to cache output configurations in client process to avoid
 // frequent calls through IAudioFlinger
-class AudioIoDescriptor : public RefBase {
+class AudioIoDescriptor : public virtual RefBase {
 public:
     AudioIoDescriptor() = default;
     // For AUDIO_{INPUT|OUTPUT}_CLOSED events.
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index a32c595..f6faaae 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -47,7 +47,7 @@
 {
 public:
 
-    /* Events used by AudioRecord callback function (callback_t).
+    /* Events used by AudioRecord callback function (legacy_callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
      */
     enum event_type {
@@ -65,7 +65,7 @@
     };
 
     /* Client should declare a Buffer and pass address to obtainBuffer()
-     * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
+     * and releaseBuffer().  See also legacy_callback_t for EVENT_MORE_DATA.
      */
 
     class Buffer
@@ -117,7 +117,28 @@
      *          - EVENT_NEW_IAUDIORECORD: unused.
      */
 
-    typedef void (*callback_t)(int event, void* user, void *info);
+    typedef void (*legacy_callback_t)(int event, void* user, void *info);
+
+    class IAudioRecordCallback : public virtual RefBase {
+        friend AudioRecord;
+     protected:
+        // Request for client to read newly available data.
+        // Used for TRANSFER_CALLBACK mode.
+        // Parameters:
+        //  - buffer : Buffer to read from
+        // Returns:
+        //  - Number of bytes actually consumed.
+        virtual size_t onMoreData([[maybe_unused]] const AudioRecord::Buffer& buffer) { return 0; }
+        // A buffer overrun occurred.
+        virtual void onOverrun() {}
+        // Record head is at the specified marker (see setMarkerPosition()).
+        virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
+        // Record head is at a new position (see setPositionUpdatePeriod()).
+        virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+        // IAudioRecord was recreated due to re-routing, server invalidation or
+        // server crash.
+        virtual void onNewIAudioRecord() {}
+    };
 
     /* Returns the minimum frame count required for the successful creation of
      * an AudioRecord object.
@@ -182,20 +203,37 @@
      * pAttributes:        If not NULL, supersedes inputSource for use case selection.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
-
                         AudioRecord(audio_source_t inputSource,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
                                     const android::content::AttributionSourceState& client,
                                     size_t frameCount = 0,
-                                    callback_t cbf = NULL,
-                                    void* user = NULL,
+                                    const wp<IAudioRecordCallback> &callback = nullptr,
                                     uint32_t notificationFrames = 0,
                                     audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
-                                    const audio_attributes_t* pAttributes = NULL,
+                                    const audio_attributes_t* pAttributes = nullptr,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                    audio_microphone_direction_t
+                                        selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+                                    float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
+
+
+                        AudioRecord(audio_source_t inputSource,
+                                    uint32_t sampleRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    const android::content::AttributionSourceState& client,
+                                    size_t frameCount,
+                                    legacy_callback_t callback,
+                                    void* user,
+                                    uint32_t notificationFrames = 0,
+                                    audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+                                    transfer_type transferType = TRANSFER_DEFAULT,
+                                    audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                                    const audio_attributes_t* pAttributes = nullptr,
                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                     audio_microphone_direction_t
                                         selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -223,13 +261,12 @@
      *
      * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
-            status_t    set(audio_source_t inputSource,
+           status_t    set(audio_source_t inputSource,
                             uint32_t sampleRate,
                             audio_format_t format,
                             audio_channel_mask_t channelMask,
                             size_t frameCount = 0,
-                            callback_t cbf = NULL,
-                            void* user = NULL,
+                            const wp<IAudioRecordCallback> &callback = nullptr,
                             uint32_t notificationFrames = 0,
                             bool threadCanCallJava = false,
                             audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
@@ -237,7 +274,28 @@
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
                             uid_t uid = AUDIO_UID_INVALID,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL,
+                            const audio_attributes_t* pAttributes = nullptr,
+                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                            audio_microphone_direction_t
+                                selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+                            float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
+                            int32_t maxSharedAudioHistoryMs = 0);
+
+           status_t    set(audio_source_t inputSource,
+                            uint32_t sampleRate,
+                            audio_format_t format,
+                            audio_channel_mask_t channelMask,
+                            size_t frameCount,
+                            legacy_callback_t callback,
+                            void* user,
+                            uint32_t notificationFrames = 0,
+                            bool threadCanCallJava = false,
+                            audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+                            transfer_type transferType = TRANSFER_DEFAULT,
+                            audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                            uid_t uid = AUDIO_UID_INVALID,
+                            pid_t pid = -1,
+                            const audio_attributes_t* pAttributes = nullptr,
                             audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                             audio_microphone_direction_t
                                 selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
@@ -673,8 +731,9 @@
     bool                    mActive;
 
     // for client callback handler
-    callback_t              mCbf;                   // callback handler for events, or NULL
-    void*                   mUserData;
+
+    wp<IAudioRecordCallback> mCallback;
+    sp<IAudioRecordCallback> mLegacyCallbackWrapper;
 
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 193e7f7..0e9d48c 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -538,7 +538,7 @@
 
 
     // A listener for capture state changes.
-    class CaptureStateListener : public RefBase {
+    class CaptureStateListener : public virtual RefBase {
     public:
         // Called whenever capture state changes.
         virtual void onStateChanged(bool active) = 0;
@@ -563,7 +563,7 @@
 
     // ----------------------------------------------------------------------------
 
-    class AudioVolumeGroupCallback : public RefBase
+    class AudioVolumeGroupCallback : public virtual RefBase
     {
     public:
 
@@ -578,7 +578,7 @@
     static status_t addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
     static status_t removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
 
-    class AudioPortCallback : public RefBase
+    class AudioPortCallback : public virtual RefBase
     {
     public:
 
@@ -594,7 +594,7 @@
     static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
     static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
 
-    class AudioDeviceCallback : public RefBase
+    class AudioDeviceCallback : public virtual RefBase
     {
     public:
 
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 1bc2c32..16e10b5 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -150,46 +150,74 @@
     class IAudioTrackCallback : public virtual RefBase {
       friend AudioTrack;
       protected:
-        // Request to write more data to buffer.
-        // This event only occurs for TRANSFER_CALLBACK.
-        // If this event is delivered but the callback handler does not want to write more data,
-        // the handler must ignore the event by returning zero.
-        // This might occur, for example, if the application is waiting for source data or is at
-        // the end of stream.
-        // For data filling, it is preferred that the callback does not block and instead returns
-        // a short count of the amount of data actually delivered.
-        // buffer: Buffer to fill
-        virtual size_t onMoreData(const AudioTrack::Buffer& buffer) { return buffer.size; }
+       /* Request to write more data to buffer.
+        * This event only occurs for TRANSFER_CALLBACK.
+        * If this event is delivered but the callback handler does not want to write more data,
+        * the handler must ignore the event by returning zero.
+        * This might occur, for example, if the application is waiting for source data or is at
+        * the end of stream.
+        * For data filling, it is preferred that the callback does not block and instead returns
+        * a short count of the amount of data actually delivered.
+        * Parameters:
+        *  - buffer: Buffer to fill
+        * Returns:
+        * Amount of data actually written in bytes.
+        */
+        virtual size_t onMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) { return 0; }
+
         // Buffer underrun occurred. This will not occur for static tracks.
         virtual void onUnderrun() {}
-        // Sample loop end was reached; playback restarted from loop start if loop count was not 0
-        // for a static track.
-        // loopsRemaining: Number of loops remaining to be played. -1 if infinite looping.
+
+       /* Sample loop end was reached; playback restarted from loop start if loop count was not 0
+        * for a static track.
+        * Parameters:
+        *  - loopsRemaining: Number of loops remaining to be played. -1 if infinite looping.
+        */
         virtual void onLoopEnd([[maybe_unused]] int32_t loopsRemaining) {}
-        // Playback head is at the specified marker (See setMarkerPosition()).
-        // onMarker: Marker position in frames
+
+       /* Playback head is at the specified marker (See setMarkerPosition()).
+        * Parameters:
+        *  - onMarker: Marker position in frames
+        */
         virtual void onMarker([[maybe_unused]] uint32_t markerPosition) {}
-        // Playback head is at a new position (See setPositionUpdatePeriod()).
-        // newPos: New position in frames
+
+       /* Playback head is at a new position (See setPositionUpdatePeriod()).
+        * Parameters:
+        *  - newPos: New position in frames
+        */
         virtual void onNewPos([[maybe_unused]] uint32_t newPos) {}
+
         // Playback has completed for a static track.
         virtual void onBufferEnd() {}
+
         // IAudioTrack was re-created, either due to re-routing and voluntary invalidation
         // by mediaserver, or mediaserver crash.
         virtual void onNewIAudioTrack() {}
+
         // Sent after all the buffers queued in AF and HW are played back (after stop is called)
         // for an offloaded track.
         virtual void onStreamEnd() {}
-        // Delivered periodically and when there's a significant change
-        // in the mapping from frame position to presentation time.
-        // See AudioTimestamp for the information included with event.
-        // TODO not yet implemented.
+
+       /* Delivered periodically and when there's a significant change
+        * in the mapping from frame position to presentation time.
+        * See AudioTimestamp for the information included with event.
+        * TODO not yet implemented.
+        * Parameters:
+        *  - timestamp: New frame position and presentation time mapping.
+        */
         virtual void onNewTimestamp([[maybe_unused]] AudioTimestamp timestamp) {}
-        // Notification that more data can be given by write()
-        // This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
-        // Similar to onMoreData(), return the number of frames actually written
-        // buffer: Buffer to fill
-        virtual size_t onCanWriteMoreData(const AudioTrack::Buffer& buffer) { return buffer.size; }
+
+       /* Notification that more data can be given by write()
+        * This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
+        * Similar to onMoreData(), return the number of frames actually written
+        * Parameters:
+        *  - buffer: Buffer to fill
+        * Returns:
+        * Amount of data actually written in bytes.
+        */
+        virtual size_t onCanWriteMoreData([[maybe_unused]] const AudioTrack::Buffer& buffer) {
+            return 0;
+        }
     };
 
     /* Returns the minimum frame count required for the successful creation of
@@ -1489,6 +1517,9 @@
     std::string mMetricsId;  // GUARDED_BY(mLock), could change in createTrack_l().
     std::string mCallerName; // for example "aaudio"
 
+    // report error to mediametrics.
+    void reportError(status_t status, const char *event, const char *message) const;
+
 private:
     class AudioTrackCallback : public media::BnAudioTrackCallback {
     public:
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 485648d..b4ee4dc 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -65,7 +65,7 @@
 
 // ----------------------------------------------------------------------------
 
-class IAudioFlinger : public RefBase {
+class IAudioFlinger : public virtual RefBase {
 public:
     static constexpr char DEFAULT_SERVICE_NAME[] = "media.audio_flinger";
 
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index 634b2e1..4513323 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -162,9 +162,16 @@
     }
 }
 
-void AudioPort::dump(std::string *dst, int spaces, bool verbose) const {
+void AudioPort::dump(std::string *dst, int spaces, const char* extraInfo, bool verbose) const {
     if (!mName.empty()) {
-        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+        dst->append(base::StringPrintf("\"%s\"%s", mName.c_str(),
+                        extraInfo != nullptr ? "; " : ""));
+    }
+    if (extraInfo != nullptr) {
+        dst->append(base::StringPrintf("%s", extraInfo));
+    }
+    if (!mName.empty() || extraInfo != nullptr) {
+        dst->append("\n");
     }
     if (verbose) {
         std::string profilesStr;
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 15f2d68..ec10bc9 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -99,18 +99,14 @@
 
 void AudioProfile::dump(std::string *dst, int spaces) const
 {
-    dst->append(base::StringPrintf("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+    dst->append(base::StringPrintf("\"%s\"; ", mName.c_str()));
+    dst->append(base::StringPrintf("%s%s%s%s", mIsDynamicFormat ? "[dynamic format]" : "",
              mIsDynamicChannels ? "[dynamic channels]" : "",
-             mIsDynamicRate ? "[dynamic rates]" : ""));
-    if (mName.length() != 0) {
-        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
-    }
-    std::string formatLiteral;
-    if (FormatConverter::toString(mFormat, formatLiteral)) {
-        dst->append(base::StringPrintf("%*s- format: %s\n", spaces, "", formatLiteral.c_str()));
-    }
+             mIsDynamicRate ? "[dynamic rates]" : "", isDynamic() ? "; " : ""));
+    dst->append(base::StringPrintf("%s (0x%x)\n", audio_format_to_string(mFormat), mFormat));
+
     if (!mSamplingRates.empty()) {
-        dst->append(base::StringPrintf("%*s- sampling rates:", spaces, ""));
+        dst->append(base::StringPrintf("%*ssampling rates: ", spaces, ""));
         for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
             dst->append(base::StringPrintf("%d", *it));
             dst->append(++it == mSamplingRates.end() ? "" : ", ");
@@ -119,7 +115,7 @@
     }
 
     if (!mChannelMasks.empty()) {
-        dst->append(base::StringPrintf("%*s- channel masks:", spaces, ""));
+        dst->append(base::StringPrintf("%*schannel masks: ", spaces, ""));
         for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
             dst->append(base::StringPrintf("0x%04x", *it));
             dst->append(++it == mChannelMasks.end() ? "" : ", ");
@@ -128,7 +124,7 @@
     }
 
     dst->append(base::StringPrintf(
-            "%*s- encapsulation type: %#x\n", spaces, "", mEncapsulationType));
+             "%*s%s\n", spaces, "", audio_encapsulation_type_to_string(mEncapsulationType)));
 }
 
 bool AudioProfile::equals(const sp<AudioProfile>& other) const
@@ -321,11 +317,12 @@
 
 void AudioProfileVector::dump(std::string *dst, int spaces) const
 {
-    dst->append(base::StringPrintf("%*s- Profiles:\n", spaces, ""));
+    dst->append(base::StringPrintf("%*s- Profiles (%zu):\n", spaces - 2, "", size()));
     for (size_t i = 0; i < size(); i++) {
-        dst->append(base::StringPrintf("%*sProfile %zu:", spaces + 4, "", i));
+        const std::string prefix = base::StringPrintf("%*s%zu. ", spaces + 1, "", i + 1);
+        dst->append(prefix);
         std::string profileStr;
-        at(i)->dump(&profileStr, spaces + 8);
+        at(i)->dump(&profileStr, prefix.size());
         dst->append(profileStr);
     }
 }
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index c8222e7..88ba544 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -113,29 +113,24 @@
 void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
                                 const char* extraInfo, bool verbose) const
 {
-    dst->append(base::StringPrintf("%*sDevice %d:\n", spaces, "", index + 1));
+    const std::string prefix = base::StringPrintf("%*s %d. ", spaces, "", index + 1);
+    dst->append(prefix);
     if (mId != 0) {
-        dst->append(base::StringPrintf("%*s- id: %2d\n", spaces, "", mId));
+        dst->append(base::StringPrintf("Port ID: %d; ", mId));
     }
-
     if (extraInfo != nullptr) {
-        dst->append(extraInfo);
+        dst->append(base::StringPrintf("%s; ", extraInfo));
     }
-
-    dst->append(base::StringPrintf("%*s- type: %-48s\n",
-            spaces, "", ::android::toString(mDeviceTypeAddr.mType).c_str()));
+    dst->append(base::StringPrintf("%s (%s)\n",
+                    audio_device_to_string(mDeviceTypeAddr.mType),
+                    mDeviceTypeAddr.toString(true /*includeSensitiveInfo*/).c_str()));
 
     dst->append(base::StringPrintf(
-            "%*s- supported encapsulation modes: %u\n", spaces, "", mEncapsulationModes));
-    dst->append(base::StringPrintf(
-            "%*s- supported encapsulation metadata types: %u\n",
-            spaces, "", mEncapsulationMetadataTypes));
+                    "%*sEncapsulation modes: %u, metadata types: %u\n",
+                    static_cast<int>(prefix.size()), "",
+                    mEncapsulationModes, mEncapsulationMetadataTypes));
 
-    if (mDeviceTypeAddr.address().size() != 0) {
-        dst->append(base::StringPrintf(
-                "%*s- address: %-32s\n", spaces, "", mDeviceTypeAddr.getAddress()));
-    }
-    AudioPort::dump(dst, spaces, verbose);
+    AudioPort::dump(dst, prefix.size(), nullptr, verbose);
 }
 
 std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index 985e05e..d6a098f 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -111,7 +111,8 @@
                         ((mFlags.input & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0)));
     }
 
-    void dump(std::string *dst, int spaces, bool verbose = true) const;
+    void dump(std::string *dst, int spaces,
+              const char* extraInfo = nullptr, bool verbose = true) const;
 
     void log(const char* indent) const;
 
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
index 62670e4..d7cddb7 100644
--- a/media/libaudiofoundation/include/media/AudioProfile.h
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -69,7 +69,7 @@
     void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
     bool isDynamicFormat() const { return mIsDynamicFormat; }
 
-    bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+    bool isDynamic() const { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
 
     audio_encapsulation_type_t getEncapsulationType() const { return mEncapsulationType; }
     void setEncapsulationType(audio_encapsulation_type_t encapsulationType) {
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index 97b5b95..e29364c 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -30,6 +30,8 @@
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
     MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_SCREEN),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_CALL_REDIRECT),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_COMMUNICATION_REDIRECT),
     TERMINATOR
 };
 
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index d597a4d..1c71f5c 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -23,6 +23,7 @@
 
 #include <mutex>
 #include <set>
+#include <unordered_map>
 
 #include <binder/Parcel.h>
 #include <cutils/properties.h>
@@ -51,6 +52,32 @@
 // the service is off.
 #define SVC_TRIES               2
 
+static const std::unordered_map<std::string, int32_t>& getErrorStringMap() {
+    // DO NOT MODIFY VALUES (OK to add new ones).
+    // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
+    static std::unordered_map<std::string, int32_t> map{
+        {"",                                      NO_ERROR},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT, BAD_VALUE},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_IO,       DEAD_OBJECT},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY,   NO_MEMORY},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY, PERMISSION_DENIED},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_STATE,    INVALID_OPERATION},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT,  WOULD_BLOCK},
+        {AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN,  UNKNOWN_ERROR},
+    };
+    return map;
+}
+
+status_t errorStringToStatus(const char *error) {
+    const auto& map = getErrorStringMap();
+    if (error == nullptr || error == "") return NO_ERROR;
+    auto it = map.find(error);
+    if (it != map.end()) {
+        return it->second;
+    }
+    return UNKNOWN_ERROR;
+}
+
 mediametrics::Item* mediametrics::Item::convert(mediametrics_handle_t handle) {
     mediametrics::Item *item = (android::mediametrics::Item *) handle;
     return item;
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index a09a673..5d0eca0 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -115,6 +115,19 @@
 #define AMEDIAMETRICS_PROP_DIRECTION      "direction"      // string AAudio input or output
 #define AMEDIAMETRICS_PROP_DURATIONNS     "durationNs"     // int64 duration time span
 #define AMEDIAMETRICS_PROP_ENCODING       "encoding"       // string value of format
+
+// Error statistics
+#define AMEDIAMETRICS_PROP_ERROR          "error#"         // string, empty or one of
+                                                           // AMEDIAMETRICS_PROP_ERROR_VALUE_*
+                                                           // Used for error categorization.
+#define AMEDIAMETRICS_PROP_ERRORSUBCODE   "errorSubCode#"  // int32, specific code for error
+                                                           // used in conjunction with error#.
+#define AMEDIAMETRICS_PROP_ERRORMESSAGE   "errorMessage#"  // string, supplemental to error.
+                                                           // Arbitrary information treated as
+                                                           // informational, may be logcat msg,
+                                                           // or an exception with stack trace.
+                                                           // Treated as "debug" information.
+
 #define AMEDIAMETRICS_PROP_EVENT          "event#"         // string value (often func name)
 #define AMEDIAMETRICS_PROP_EXECUTIONTIMENS "executionTimeNs"  // time to execute the event
 
@@ -215,4 +228,62 @@
 #define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_TONEGENERATOR "tonegenerator"  // dial tones
 #define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN       "unknown"        // callerName not set
 
+// MediaMetrics errors are expected to cover the following sources:
+// https://docs.oracle.com/javase/7/docs/api/java/lang/RuntimeException.html
+// https://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/native/libs/binder/include/binder/Status.h;drc=88e25c0861499ee3ab885814dddc097ab234cb7b;l=57
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/base/media/java/android/media/AudioSystem.java;drc=3ac246c43294d7f7012bdcb0ccb7bae1aa695bd4;l=785
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/av/media/libaaudio/include/aaudio/AAudio.h;drc=cfd3a6fa3aaaf712a890dc02452b38ef401083b8;l=120
+
+// Error category:
+// An empty error string indicates no error.
+
+// Error category: argument
+//   IllegalArgumentException
+//   NullPointerException
+//   BAD_VALUE
+//   Out of range, out of bounds.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT           "argument"
+
+// Error category: io
+//   IOException
+//   android.os.DeadObjectException, android.os.RemoteException
+//   DEAD_OBJECT
+//   FAILED_TRANSACTION
+//   IO_ERROR
+//   file or ioctl failure
+//   Service, rpc, binder, or socket failure.
+//   Hardware or device failure.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_IO                 "io"
+
+// Error category: outOfMemory
+//   OutOfMemoryException
+//   NO_MEMORY
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY             "memory"
+
+// Error category: security
+//   SecurityException
+//   PERMISSION_DENIED
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY           "security"
+
+// Error category: state
+//   IllegalStateException
+//   UnsupportedOperationException
+//   INVALID_OPERATION
+//   NO_INIT
+//   Functionality not implemented (argument may or may not be correct).
+//   Call unexpected or out of order.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_STATE              "state"
+
+// Error category: timeout
+//   TimeoutException
+//   WOULD_BLOCK
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT            "timeout"
+
+// Error category: unknown
+//   Exception (Java specified not listed above, or custom app/service)
+//   UNKNOWN_ERROR
+//   Catch-all bucket for errors not listed above.
+#define AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN            "unknown"
+
 #endif // ANDROID_MEDIA_MEDIAMETRICSCONSTANTS_H
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index d69f78e..f2cd505 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -105,6 +105,36 @@
 };
 
 /*
+ * Helper for error conversions
+ */
+
+static inline constexpr const char* statusToErrorString(status_t status) {
+    switch (status) {
+    case NO_ERROR:
+        return "";
+    case BAD_VALUE:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT;
+    case DEAD_OBJECT:
+    case FAILED_TRANSACTION:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_IO;
+    case NO_MEMORY:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY;
+    case PERMISSION_DENIED:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY;
+    case NO_INIT:
+    case INVALID_OPERATION:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_STATE;
+    case WOULD_BLOCK:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT;
+    case UNKNOWN_ERROR:
+    default:
+        return AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN;
+    }
+}
+
+status_t errorStringToStatus(const char *error);
+
+/*
  * Time printing
  *
  * kPrintFormatLong time string is 19 characters (including null termination).
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 53e3d52..c7a7a3a 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -2217,8 +2217,7 @@
                     channelMask,
                     frameCount,
                     flags,
-                    NULL, // callback
-                    NULL, // user data
+                    nullptr, // callback
                     0, // notification frames
                     mSessionId,
                     AudioTrack::TRANSFER_DEFAULT,
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index a372b7f..8c86e16 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -231,6 +231,15 @@
         format = NULL;
     }
 
+    // If decoding thumbnail check decoder supports thumbnail dimensions instead
+    int32_t thumbHeight, thumbWidth;
+    if (thumbnail && format != NULL
+            && trackMeta->findInt32(kKeyThumbnailHeight, &thumbHeight)
+            && trackMeta->findInt32(kKeyThumbnailWidth, &thumbWidth)) {
+        format->setInt32("height", thumbHeight);
+        format->setInt32("width", thumbWidth);
+    }
+
     MediaCodecList::findMatchingCodecs(
             mime,
             false, /* encoder */
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 00c38d5..1ea3f99 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1511,6 +1511,9 @@
         uint32_t flags) {
     sp<AMessage> msg = new AMessage(kWhatConfigure, this);
 
+    // TODO: validity check log-session-id: it should be a 32-hex-digit.
+    format->findString("log-session-id", &mLogSessionId);
+
     if (mMetricsHandle != 0) {
         int32_t profile = 0;
         if (format->findInt32("profile", &profile)) {
@@ -1522,11 +1525,11 @@
         }
         mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
                               (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
+
+        mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
     }
 
     if (mIsVideo) {
-        // TODO: validity check log-session-id: it should be a 32-hex-digit.
-        format->findString("log-session-id", &mLogSessionId);
         format->findInt32("width", &mVideoWidth);
         format->findInt32("height", &mVideoHeight);
         if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
@@ -1534,7 +1537,6 @@
         }
 
         if (mMetricsHandle != 0) {
-            mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
             mediametrics_setInt32(mMetricsHandle, kCodecWidth, mVideoWidth);
             mediametrics_setInt32(mMetricsHandle, kCodecHeight, mVideoHeight);
             mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 0bc5976..0f5e95e 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -67,7 +67,11 @@
         mFd = -1;
     }
 
-    free(mOs);
+    if (mOs != nullptr) {
+        ogg_stream_clear(mOs);
+        free(mOs);
+        mOs = nullptr;
+    }
 }
 
 status_t OggWriter::initCheck() const {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4c18f87..a6df5bb 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1759,24 +1759,39 @@
     if (mime.startsWith("video/") || mime.startsWith("image/")) {
         int32_t width;
         int32_t height;
-        if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
-            meta->setInt32(kKeyWidth, width);
-            meta->setInt32(kKeyHeight, height);
-        } else {
+        if (!msg->findInt32("width", &width) || !msg->findInt32("height", &height)) {
             ALOGV("did not find width and/or height");
             return BAD_VALUE;
         }
+        if (width <= 0 || height <= 0) {
+            ALOGE("Invalid value of width: %d and/or height: %d", width, height);
+            return BAD_VALUE;
+        }
+        meta->setInt32(kKeyWidth, width);
+        meta->setInt32(kKeyHeight, height);
 
-        int32_t sarWidth, sarHeight;
-        if (msg->findInt32("sar-width", &sarWidth)
-                && msg->findInt32("sar-height", &sarHeight)) {
+        int32_t sarWidth = -1, sarHeight = -1;
+        bool foundWidth, foundHeight;
+        foundWidth = msg->findInt32("sar-width", &sarWidth);
+        foundHeight = msg->findInt32("sar-height", &sarHeight);
+        if (foundWidth || foundHeight) {
+            if (sarWidth <= 0 || sarHeight <= 0) {
+                ALOGE("Invalid value of sarWidth: %d and/or sarHeight: %d", sarWidth, sarHeight);
+                return BAD_VALUE;
+            }
             meta->setInt32(kKeySARWidth, sarWidth);
             meta->setInt32(kKeySARHeight, sarHeight);
         }
 
-        int32_t displayWidth, displayHeight;
-        if (msg->findInt32("display-width", &displayWidth)
-                && msg->findInt32("display-height", &displayHeight)) {
+        int32_t displayWidth = -1, displayHeight = -1;
+        foundWidth = msg->findInt32("display-width", &displayWidth);
+        foundHeight = msg->findInt32("display-height", &displayHeight);
+        if (foundWidth || foundHeight) {
+            if (displayWidth <= 0 || displayHeight <= 0) {
+                ALOGE("Invalid value of displayWidth: %d and/or displayHeight: %d",
+                        displayWidth, displayHeight);
+                return BAD_VALUE;
+            }
             meta->setInt32(kKeyDisplayWidth, displayWidth);
             meta->setInt32(kKeyDisplayHeight, displayHeight);
         }
@@ -1786,17 +1801,29 @@
             if (msg->findInt32("is-default", &isPrimary) && isPrimary) {
                 meta->setInt32(kKeyTrackIsDefault, 1);
             }
-            int32_t tileWidth, tileHeight, gridRows, gridCols;
-            if (msg->findInt32("tile-width", &tileWidth)) {
+            int32_t tileWidth = -1, tileHeight = -1;
+            foundWidth = msg->findInt32("tile-width", &tileWidth);
+            foundHeight = msg->findInt32("tile-height", &tileHeight);
+            if (foundWidth || foundHeight) {
+                if (tileWidth <= 0 || tileHeight <= 0) {
+                    ALOGE("Invalid value of tileWidth: %d and/or tileHeight: %d",
+                            tileWidth, tileHeight);
+                    return BAD_VALUE;
+                }
                 meta->setInt32(kKeyTileWidth, tileWidth);
-            }
-            if (msg->findInt32("tile-height", &tileHeight)) {
                 meta->setInt32(kKeyTileHeight, tileHeight);
             }
-            if (msg->findInt32("grid-rows", &gridRows)) {
+            int32_t gridRows = -1, gridCols = -1;
+            bool foundRows, foundCols;
+            foundRows = msg->findInt32("grid-rows", &gridRows);
+            foundCols = msg->findInt32("grid-cols", &gridCols);
+            if (foundRows || foundCols) {
+                if (gridRows <= 0 || gridCols <= 0) {
+                    ALOGE("Invalid value of gridRows: %d and/or gridCols: %d",
+                            gridRows, gridCols);
+                    return BAD_VALUE;
+                }
                 meta->setInt32(kKeyGridRows, gridRows);
-            }
-            if (msg->findInt32("grid-cols", &gridCols)) {
                 meta->setInt32(kKeyGridCols, gridCols);
             }
         }
@@ -1812,6 +1839,14 @@
                           &cropTop,
                           &cropRight,
                           &cropBottom)) {
+            if (cropLeft < 0 || cropLeft > cropRight || cropRight >= width) {
+                ALOGE("Invalid value of cropLeft: %d and/or cropRight: %d", cropLeft, cropRight);
+                return BAD_VALUE;
+            }
+            if (cropTop < 0 || cropTop > cropBottom || cropBottom >= height) {
+                ALOGE("Invalid value of cropTop: %d and/or cropBottom: %d", cropTop, cropBottom);
+                return BAD_VALUE;
+            }
             meta->setRect(kKeyCropRect, cropLeft, cropTop, cropRight, cropBottom);
         }
 
@@ -1855,9 +1890,16 @@
             ALOGV("did not find channel-count and/or sample-rate");
             return BAD_VALUE;
         }
+        // channel count can be zero in some cases like mpeg h
+        if (sampleRate <= 0 || numChannels < 0) {
+            ALOGE("Invalid value of channel-count: %d and/or sample-rate: %d",
+                   numChannels, sampleRate);
+            return BAD_VALUE;
+        }
         meta->setInt32(kKeyChannelCount, numChannels);
         meta->setInt32(kKeySampleRate, sampleRate);
         int32_t bitsPerSample;
+        // TODO:(b/204430952) add appropriate bound check for bitsPerSample
         if (msg->findInt32("bits-per-sample", &bitsPerSample)) {
             meta->setInt32(kKeyBitsPerSample, bitsPerSample);
         }
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 4237e8c..3a01925 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -364,7 +364,7 @@
 inline static const char *asString_AV1Profile(int32_t i, const char *def = "??") {
     switch (i) {
         case AV1ProfileMain8:           return "Main8";
-        case AV1ProfileMain10:          return "Main10HDR";
+        case AV1ProfileMain10:          return "Main10";
         case AV1ProfileMain10HDR10:     return "Main10HDR10";
         case AV1ProfileMain10HDR10Plus: return "Main10HDRPlus";
         default:                        return def;
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 3f4d662..30cdbc9 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -295,6 +295,10 @@
 }
 
 void AAVCAssembler::checkSpsUpdated(const sp<ABuffer> &buffer) {
+    if (buffer->size() == 0) {
+        android_errorWriteLog(0x534e4554, "204077881");
+        return;
+    }
     const uint8_t *data = buffer->data();
     unsigned nalType = data[0] & 0x1f;
     if (nalType == 0x7) {
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 88b822d..c1c7df5 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -34,6 +34,7 @@
         "SchedulingPolicyService.cpp",
         "ServiceUtilities.cpp",
         "TimeCheck.cpp",
+        "TimerThread.cpp",
     ],
     static_libs: [
         "libc_malloc_debug_backtrace",
@@ -118,3 +119,12 @@
 
     export_include_dirs: ["include"],
 }
+
+cc_test {
+    name: "libmediautils_test",
+    srcs: ["TimerThread-test.cpp"],
+    shared_libs: [
+      "libmediautils",
+      "libutils",
+    ]
+}
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 819e146..878ae8c 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -16,9 +16,11 @@
 
 #define LOG_TAG "TimeCheck"
 
-#include <utils/Log.h>
-#include <mediautils/TimeCheck.h>
+#include <optional>
+
 #include <mediautils/EventLog.h>
+#include <mediautils/TimeCheck.h>
+#include <utils/Log.h>
 #include "debuggerd/handler.h"
 
 namespace android {
@@ -58,84 +60,35 @@
 }
 
 /* static */
-sp<TimeCheck::TimeCheckThread> TimeCheck::getTimeCheckThread()
-{
-    static sp<TimeCheck::TimeCheckThread> sTimeCheckThread = new TimeCheck::TimeCheckThread();
+TimerThread* TimeCheck::getTimeCheckThread() {
+    static TimerThread* sTimeCheckThread = new TimerThread();
     return sTimeCheckThread;
 }
 
-TimeCheck::TimeCheck(const char *tag, uint32_t timeoutMs)
-    : mEndTimeNs(getTimeCheckThread()->startMonitoring(tag, timeoutMs))
-{
-}
+TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
+    : mTimerHandle(getTimeCheckThread()->scheduleTask([tag] { crash(tag); },
+                                                      std::chrono::milliseconds(timeoutMs))) {}
 
 TimeCheck::~TimeCheck() {
-    getTimeCheckThread()->stopMonitoring(mEndTimeNs);
+    getTimeCheckThread()->cancelTask(mTimerHandle);
 }
 
-TimeCheck::TimeCheckThread::~TimeCheckThread()
-{
-    AutoMutex _l(mMutex);
-    requestExit();
-    mMonitorRequests.clear();
-    mCond.signal();
-}
-
-nsecs_t TimeCheck::TimeCheckThread::startMonitoring(const char *tag, uint32_t timeoutMs) {
-    Mutex::Autolock _l(mMutex);
-    nsecs_t endTimeNs = systemTime() + milliseconds(timeoutMs);
-    for (; mMonitorRequests.indexOfKey(endTimeNs) >= 0; ++endTimeNs);
-    mMonitorRequests.add(endTimeNs, tag);
-    mCond.signal();
-    return endTimeNs;
-}
-
-void TimeCheck::TimeCheckThread::stopMonitoring(nsecs_t endTimeNs) {
-    Mutex::Autolock _l(mMutex);
-    mMonitorRequests.removeItem(endTimeNs);
-    mCond.signal();
-}
-
-bool TimeCheck::TimeCheckThread::threadLoop()
-{
-    status_t status = TIMED_OUT;
-    {
-        AutoMutex _l(mMutex);
-
-        if (exitPending()) {
-            return false;
+/* static */
+void TimeCheck::crash(const char* tag) {
+    // Generate audio HAL processes tombstones and allow time to complete
+    // before forcing restart
+    std::vector<pid_t> pids = getAudioHalPids();
+    if (pids.size() != 0) {
+        for (const auto& pid : pids) {
+            ALOGI("requesting tombstone for pid: %d", pid);
+            sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
         }
-
-        nsecs_t endTimeNs = INT64_MAX;
-        const char *tag = "<unspecified>";
-        // KeyedVector mMonitorRequests is ordered so take first entry as next timeout
-        if (mMonitorRequests.size() != 0) {
-            endTimeNs = mMonitorRequests.keyAt(0);
-            tag = mMonitorRequests.valueAt(0);
-        }
-
-        const nsecs_t waitTimeNs = endTimeNs - systemTime();
-        if (waitTimeNs > 0) {
-            status = mCond.waitRelative(mMutex, waitTimeNs);
-        }
-        if (status != NO_ERROR) {
-            // Generate audio HAL processes tombstones and allow time to complete
-            // before forcing restart
-            std::vector<pid_t> pids = getAudioHalPids();
-            if (pids.size() != 0) {
-                for (const auto& pid : pids) {
-                    ALOGI("requesting tombstone for pid: %d", pid);
-                    sigqueue(pid, DEBUGGER_SIGNAL, {.sival_int = 0});
-                }
-                sleep(1);
-            } else {
-                ALOGI("No HAL process pid available, skipping tombstones");
-            }
-            LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
-            LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
-        }
+        sleep(1);
+    } else {
+        ALOGI("No HAL process pid available, skipping tombstones");
     }
-    return true;
+    LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+    LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
 }
 
-}; // namespace android
+};  // namespace android
diff --git a/media/utils/TimerThread-test.cpp b/media/utils/TimerThread-test.cpp
new file mode 100644
index 0000000..ee8a811
--- /dev/null
+++ b/media/utils/TimerThread-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <chrono>
+#include <thread>
+#include <gtest/gtest.h>
+#include <mediautils/TimerThread.h>
+
+using namespace std::chrono_literals;
+
+namespace android {
+namespace {
+
+constexpr auto kJitter = 10ms;
+
+TEST(TimerThread, Basic) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms - kJitter);
+    ASSERT_FALSE(taskRan);
+    std::this_thread::sleep_for(2 * kJitter);
+    ASSERT_TRUE(taskRan);
+}
+
+TEST(TimerThread, Cancel) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms - kJitter);
+    ASSERT_FALSE(taskRan);
+    thread.cancelTask(handle);
+    std::this_thread::sleep_for(2 * kJitter);
+    ASSERT_FALSE(taskRan);
+}
+
+TEST(TimerThread, CancelAfterRun) {
+    std::atomic<bool> taskRan = false;
+    TimerThread thread;
+    TimerThread::Handle handle = thread.scheduleTask([&taskRan] { taskRan = true; }, 100ms);
+    std::this_thread::sleep_for(100ms + kJitter);
+    ASSERT_TRUE(taskRan);
+    thread.cancelTask(handle);
+}
+
+TEST(TimerThread, MultipleTasks) {
+    std::array<std::atomic<bool>, 6> taskRan;
+    TimerThread thread;
+
+    auto startTime = std::chrono::steady_clock::now();
+
+    thread.scheduleTask([&taskRan] { taskRan[0] = true; }, 300ms);
+    thread.scheduleTask([&taskRan] { taskRan[1] = true; }, 100ms);
+    thread.scheduleTask([&taskRan] { taskRan[2] = true; }, 200ms);
+    thread.scheduleTask([&taskRan] { taskRan[3] = true; }, 400ms);
+    auto handle4 = thread.scheduleTask([&taskRan] { taskRan[4] = true; }, 200ms);
+    thread.scheduleTask([&taskRan] { taskRan[5] = true; }, 200ms);
+
+    // Task 1 should trigger around 100ms.
+    std::this_thread::sleep_until(startTime + 100ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_FALSE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 100ms + kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    // Cancel task 4 before it gets a chance to run.
+    thread.cancelTask(handle4);
+
+    // Tasks 2 and 5 should trigger around 200ms.
+    std::this_thread::sleep_until(startTime + 200ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_FALSE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_FALSE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 200ms + kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    // Task 0 should trigger around 300ms.
+    std::this_thread::sleep_until(startTime + 300ms - kJitter);
+    ASSERT_FALSE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 300ms + kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    // Task 3 should trigger around 400ms.
+    std::this_thread::sleep_until(startTime + 400ms - kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_FALSE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+
+    std::this_thread::sleep_until(startTime + 400ms + kJitter);
+    ASSERT_TRUE(taskRan[0]);
+    ASSERT_TRUE(taskRan[1]);
+    ASSERT_TRUE(taskRan[2]);
+    ASSERT_TRUE(taskRan[3]);
+    ASSERT_FALSE(taskRan[4]);
+    ASSERT_TRUE(taskRan[5]);
+}
+
+
+}  // namespace
+}  // namespace android
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
new file mode 100644
index 0000000..3c95798
--- /dev/null
+++ b/media/utils/TimerThread.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TimerThread"
+
+#include <optional>
+
+#include <mediautils/TimerThread.h>
+#include <utils/ThreadDefs.h>
+
+namespace android {
+
+TimerThread::TimerThread() : mThread([this] { threadFunc(); }) {
+    pthread_setname_np(mThread.native_handle(), "TimeCheckThread");
+    pthread_setschedprio(mThread.native_handle(), PRIORITY_URGENT_AUDIO);
+}
+
+TimerThread::~TimerThread() {
+    {
+        std::lock_guard _l(mMutex);
+        mShouldExit = true;
+        mCond.notify_all();
+    }
+    mThread.join();
+}
+
+TimerThread::Handle TimerThread::scheduleTaskAtDeadline(std::function<void()>&& func,
+                                                        TimePoint deadline) {
+    std::lock_guard _l(mMutex);
+
+    // To avoid key collisions, advance by 1 tick until the key is unique.
+    for (; mMonitorRequests.find(deadline) != mMonitorRequests.end();
+         deadline += TimePoint::duration(1))
+        ;
+    mMonitorRequests.emplace(deadline, std::move(func));
+    mCond.notify_all();
+    return deadline;
+}
+
+void TimerThread::cancelTask(Handle handle) {
+    std::lock_guard _l(mMutex);
+    mMonitorRequests.erase(handle);
+}
+
+void TimerThread::threadFunc() {
+    std::unique_lock _l(mMutex);
+
+    while (!mShouldExit) {
+        if (!mMonitorRequests.empty()) {
+            TimePoint nextDeadline = mMonitorRequests.begin()->first;
+            if (nextDeadline < std::chrono::steady_clock::now()) {
+                // Deadline expired.
+                mMonitorRequests.begin()->second();
+                mMonitorRequests.erase(mMonitorRequests.begin());
+            }
+            mCond.wait_until(_l, nextDeadline);
+        } else {
+            mCond.wait(_l);
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 5ba6d7c..2411f97 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -14,62 +14,33 @@
  * limitations under the License.
  */
 
+#pragma once
 
-#ifndef ANDROID_TIME_CHECK_H
-#define ANDROID_TIME_CHECK_H
-
-#include <utils/KeyedVector.h>
-#include <utils/Thread.h>
 #include <vector>
 
+#include <mediautils/TimerThread.h>
+
 namespace android {
 
 // A class monitoring execution time for a code block (scoped variable) and causing an assert
 // if it exceeds a certain time
 
 class TimeCheck {
-public:
-
+  public:
     // The default timeout is chosen to be less than system server watchdog timeout
     static constexpr uint32_t kDefaultTimeOutMs = 5000;
 
-            TimeCheck(const char *tag, uint32_t timeoutMs = kDefaultTimeOutMs);
-            ~TimeCheck();
-    static  void setAudioHalPids(const std::vector<pid_t>& pids);
-    static  std::vector<pid_t> getAudioHalPids();
+    TimeCheck(const char* tag, uint32_t timeoutMs = kDefaultTimeOutMs);
+    ~TimeCheck();
+    static void setAudioHalPids(const std::vector<pid_t>& pids);
+    static std::vector<pid_t> getAudioHalPids();
 
-private:
-
-    class TimeCheckThread : public Thread {
-    public:
-
-                            TimeCheckThread() {}
-        virtual             ~TimeCheckThread() override;
-
-                nsecs_t     startMonitoring(const char *tag, uint32_t timeoutMs);
-                void        stopMonitoring(nsecs_t endTimeNs);
-
-    private:
-
-                // RefBase
-        virtual void        onFirstRef() override { run("TimeCheckThread", PRIORITY_URGENT_AUDIO); }
-
-                // Thread
-        virtual bool        threadLoop() override;
-
-                Condition           mCond;
-                Mutex               mMutex;
-                // using the end time in ns as key is OK given the risk is low that two entries
-                // are added in such a way that <add time> + <timeout> are the same for both.
-                KeyedVector< nsecs_t, const char*>  mMonitorRequests;
-    };
-
-    static sp<TimeCheckThread> getTimeCheckThread();
+  private:
+    static TimerThread* getTimeCheckThread();
     static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
+    static void crash(const char* tag);
 
-    const           nsecs_t mEndTimeNs;
+    const TimerThread::Handle mTimerHandle;
 };
 
-}; // namespace android
-
-#endif  // ANDROID_TIME_CHECK_H
+};  // namespace android
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
new file mode 100644
index 0000000..cf457b8
--- /dev/null
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <functional>
+#include <map>
+#include <mutex>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+
+namespace android {
+
+/**
+ * A thread for deferred execution of tasks, with cancellation.
+ */
+class TimerThread {
+  public:
+    using Handle = std::chrono::steady_clock::time_point;
+
+    TimerThread();
+    ~TimerThread();
+
+    /**
+     * Schedule a task to be executed in the future (`timeout` duration from now).
+     * Returns a handle that can be used for cancellation.
+     */
+    template <typename R, typename P>
+    Handle scheduleTask(std::function<void()>&& func, std::chrono::duration<R, P> timeout) {
+        auto deadline = std::chrono::steady_clock::now() + std::chrono::milliseconds(timeout);
+        return scheduleTaskAtDeadline(std::move(func), deadline);
+    }
+
+    /**
+     * Cancel a task, previously scheduled with scheduleTask().
+     * If the task has already executed, this is a no-op.
+     */
+    void cancelTask(Handle handle);
+
+  private:
+    using TimePoint = std::chrono::steady_clock::time_point;
+
+    std::condition_variable mCond;
+    std::mutex mMutex;
+    std::thread mThread;
+    std::map<TimePoint, std::function<void()>> mMonitorRequests GUARDED_BY(mMutex);
+    bool mShouldExit GUARDED_BY(mMutex) = false;
+
+    void threadFunc();
+    Handle scheduleTaskAtDeadline(std::function<void()>&& func, TimePoint deadline);
+};
+
+}  // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index daed5fc..5ebf483 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -565,6 +565,12 @@
             : mChain(owner)
             , mThread(thread)
             , mAudioFlinger(*gAudioFlinger) {
+            sp<ThreadBase> base = thread.promote();
+            if (base != nullptr) {
+                mThreadType = base->type();
+            } else {
+                mThreadType = ThreadBase::MIXER;  // assure a consistent value.
+            }
         }
 
         status_t createEffectHal(const effect_uuid_t *pEffectUuid,
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e0c5fa5..616fd78 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1405,6 +1405,60 @@
             .content_type = mAttr.content_type,
             .gain = mFinalVolume,
     };
+
+    // When attributes are undefined, derive default values from stream type.
+    // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
+    if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
+        switch (mStreamType) {
+        case AUDIO_STREAM_VOICE_CALL:
+            metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        case AUDIO_STREAM_SYSTEM:
+            metadata.base.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_RING:
+            metadata.base.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_MUSIC:
+            metadata.base.usage = AUDIO_USAGE_MEDIA;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+            break;
+        case AUDIO_STREAM_ALARM:
+            metadata.base.usage = AUDIO_USAGE_ALARM;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_NOTIFICATION:
+            metadata.base.usage = AUDIO_USAGE_NOTIFICATION;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_DTMF:
+            metadata.base.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+            break;
+        case AUDIO_STREAM_ACCESSIBILITY:
+            metadata.base.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        case AUDIO_STREAM_ASSISTANT:
+            metadata.base.usage = AUDIO_USAGE_ASSISTANT;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        case AUDIO_STREAM_REROUTING:
+            metadata.base.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
+            // unknown content type
+            break;
+        case AUDIO_STREAM_CALL_ASSISTANT:
+            metadata.base.usage = AUDIO_USAGE_CALL_ASSISTANT;
+            metadata.base.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+            break;
+        default:
+            break;
+        }
+    }
+
     metadata.channel_mask = mChannelMask,
     strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
     *backInserter++ = metadata;
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index 227c2d8..1f23ae3 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -30,6 +30,7 @@
     ],
     shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "libcutils",
         "libhidlbase",
         "liblog",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index cf699d3..856ae66 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -54,7 +54,7 @@
     DeviceVector supportedDevices() const  {
         return mProfile != nullptr ? mProfile->getSupportedDevices() :  DeviceVector(); }
 
-    void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo) const override;
 
     audio_io_handle_t   mIoHandle = AUDIO_IO_HANDLE_NONE; // input handle
     wp<AudioPolicyMix>  mPolicyMix;                   // non NULL when used by a dynamic policy
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index d06496d..69082ac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -149,7 +149,7 @@
                           AudioPolicyClientInterface *clientInterface);
     virtual ~AudioOutputDescriptor() {}
 
-    void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     void        log(const char* indent);
 
     virtual DeviceVector devices() const { return mDevices; }
@@ -336,7 +336,7 @@
                             AudioPolicyClientInterface *clientInterface);
     virtual ~SwAudioOutputDescriptor() {}
 
-            void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const override;
     virtual DeviceVector devices() const;
     void setDevices(const DeviceVector &devices) { mDevices = devices; }
     bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
@@ -441,7 +441,7 @@
                             AudioPolicyClientInterface *clientInterface);
     virtual ~HwAudioOutputDescriptor() {}
 
-            void dump(String8 *dst) const override;
+    void dump(String8 *dst, int spaces, const char* extraInfo) const override;
 
     virtual bool setVolume(float volumeDb,
                            VolumeSource volumeSource, const StreamTypeVector &streams,
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 74b3405..e421c94 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -269,10 +269,11 @@
     size_t getClientCount() const {
         return mClients.size();
     }
-    virtual void dump(String8 *dst) const {
+    virtual void dump(String8 *dst, int spaces, const char* extraInfo = nullptr) const {
+        (void)extraInfo;
         size_t index = 0;
         for (const auto& client: getClientIterable()) {
-            client->dump(dst, 2, index++);
+            client->dump(dst, spaces, index++);
         }
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 9ba745a..436fcc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -80,6 +80,7 @@
 
     sp<DeviceDescriptor> getRouteSinkDevice(const sp<AudioRoute> &route) const;
     DeviceVector getRouteSourceDevices(const sp<AudioRoute> &route) const;
+    const AudioRouteVector& getRoutes() const { return mRoutes; }
     void setRoutes(const AudioRouteVector &routes);
 
     status_t addOutputProfile(const sp<IOProfile> &profile);
@@ -114,7 +115,7 @@
                        const sp<PolicyAudioPort> &dstPort) const;
 
     // TODO remove from here (split serialization)
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces) const;
 
 private:
     void refreshSupportedDevices();
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 2e680e3..90b812d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -97,7 +97,7 @@
                              uint32_t flags,
                              bool exactMatchRequiredForInputFlags = false) const;
 
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces) const;
     void log();
 
     bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index cd10010..0fe5c16 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioCollections"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include "AudioCollections.h"
 #include "AudioRoute.h"
 #include "HwModule.h"
@@ -40,10 +42,11 @@
     if (audioRouteVector.isEmpty()) {
         return;
     }
-    dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", audioRouteVector.size());
+    dst->appendFormat("%*s- Audio Routes (%zu):\n", spaces - 2, "", audioRouteVector.size());
     for (size_t i = 0; i < audioRouteVector.size(); i++) {
-        dst->appendFormat("%*s- Route %zu:\n", spaces, "", i + 1);
-        audioRouteVector.itemAt(i)->dump(dst, 4);
+        const std::string prefix = base::StringPrintf("%*s%zu. ", spaces + 1, "", i + 1);
+        dst->append(prefix.c_str());
+        audioRouteVector.itemAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 6c1240b..966b8cb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioInputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include <audiomanager/AudioManager.h>
 #include <media/AudioPolicy.h>
 #include <policy.h>
@@ -508,17 +510,20 @@
     }
 }
 
-void AudioInputDescriptor::dump(String8 *dst) const
+void AudioInputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" ID: %d\n", getId());
-    dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
-    dst->appendFormat(" Format: %d\n", mFormat);
-    dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices %s\n", mDevice->toString(true /*includeSensitiveInfo*/).c_str());
-    mEnabledEffects.dump(dst, 1 /*spaces*/, false /*verbose*/);
-    dst->append(" AudioRecord Clients:\n");
-    ClientMapHandler<RecordClientDescriptor>::dump(dst);
-    dst->append("\n");
+    dst->appendFormat("Port ID: %d%s%s\n",
+            getId(), extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+    dst->appendFormat("%*sDevices: %s\n", spaces, "",
+            mDevice->toString(true /*includeSensitiveInfo*/).c_str());
+    mEnabledEffects.dump(dst, spaces /*spaces*/, false /*verbose*/);
+    if (getClientCount() != 0) {
+        dst->appendFormat("%*sAudioRecord Clients (%zu):\n", spaces, "", getClientCount());
+        ClientMapHandler<RecordClientDescriptor>::dump(dst, spaces);
+        dst->append("\n");
+    }
 }
 
 bool AudioInputCollection::isSourceActive(audio_source_t source) const
@@ -606,10 +611,12 @@
 
 void AudioInputCollection::dump(String8 *dst) const
 {
-    dst->append("\nInputs dump:\n");
+    dst->appendFormat("\n Inputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Input %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index b054c6d..1ae66de 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -17,6 +17,8 @@
 #define LOG_TAG "APM::AudioOutputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
+
 #include <AudioPolicyInterface.h>
 #include "AudioOutputDescriptor.h"
 #include "AudioPolicyMix.h"
@@ -243,32 +245,42 @@
         return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
 }
 
-void AudioOutputDescriptor::dump(String8 *dst) const
+void AudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" ID: %d\n", mId);
-    dst->appendFormat(" Sampling rate: %d\n", mSamplingRate);
-    dst->appendFormat(" Format: %08x\n", mFormat);
-    dst->appendFormat(" Channels: %08x\n", mChannelMask);
-    dst->appendFormat(" Devices: %s\n", devices().toString(true /*includeSensitiveInfo*/).c_str());
-    dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
-    for (const auto &iter : mRoutingActivities) {
-        dst->appendFormat(" Product Strategy id: %d", iter.first);
-        iter.second.dump(dst, 4);
+    dst->appendFormat("Port ID: %d%s%s\n",
+            mId, extraInfo != nullptr ? "; " : "", extraInfo != nullptr ? extraInfo : "");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mFormat), mSamplingRate, mChannelMask);
+    dst->appendFormat("%*sDevices: %s\n", spaces, "",
+            devices().toString(true /*includeSensitiveInfo*/).c_str());
+    dst->appendFormat("%*sGlobal active count: %u\n", spaces, "", mGlobalActiveCount);
+    if (!mRoutingActivities.empty()) {
+        dst->appendFormat("%*sProduct Strategies (%zu):\n", spaces, "", mRoutingActivities.size());
+        for (const auto &iter : mRoutingActivities) {
+            dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+            iter.second.dump(dst, 0);
+        }
     }
-    for (const auto &iter : mVolumeActivities) {
-        dst->appendFormat(" Volume Activities id: %d", iter.first);
-        iter.second.dump(dst, 4);
+    if (!mVolumeActivities.empty()) {
+        dst->appendFormat("%*sVolume Activities (%zu):\n", spaces, "", mVolumeActivities.size());
+        for (const auto &iter : mVolumeActivities) {
+            dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
+            iter.second.dump(dst, 0);
+        }
     }
-    dst->append(" AudioTrack Clients:\n");
-    ClientMapHandler<TrackClientDescriptor>::dump(dst);
-    dst->append("\n");
+    if (getClientCount() != 0) {
+        dst->appendFormat("%*sAudioTrack Clients (%zu):\n", spaces, "", getClientCount());
+        ClientMapHandler<TrackClientDescriptor>::dump(dst, spaces);
+        dst->append("\n");
+    }
     if (!mActiveClients.empty()) {
-        dst->append(" AudioTrack active (stream) clients:\n");
+        dst->appendFormat("%*sAudioTrack active (stream) clients (%zu):\n", spaces, "",
+                mActiveClients.size());
         size_t index = 0;
         for (const auto& client : mActiveClients) {
-            client->dump(dst, 2, index++);
+            client->dump(dst, spaces, index++);
         }
-        dst->append(" \n");
+        dst->append("\n");
     }
 }
 
@@ -292,11 +304,18 @@
     }
 }
 
-void SwAudioOutputDescriptor::dump(String8 *dst) const
+void SwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    dst->appendFormat(" Latency: %d\n", mLatency);
-    dst->appendFormat(" Flags %08x\n", mFlags);
-    AudioOutputDescriptor::dump(dst);
+    String8 allExtraInfo;
+    if (extraInfo != nullptr) {
+        allExtraInfo.appendFormat("%s; ", extraInfo);
+    }
+    std::string flagsLiteral = toString(mFlags);
+    allExtraInfo.appendFormat("Latency: %d; 0x%04x", mLatency, mFlags);
+    if (!flagsLiteral.empty()) {
+        allExtraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
+    }
+    AudioOutputDescriptor::dump(dst, spaces, allExtraInfo.c_str());
 }
 
 DeviceVector SwAudioOutputDescriptor::devices() const
@@ -685,11 +704,11 @@
 {
 }
 
-void HwAudioOutputDescriptor::dump(String8 *dst) const
+void HwAudioOutputDescriptor::dump(String8 *dst, int spaces, const char* extraInfo) const
 {
-    AudioOutputDescriptor::dump(dst);
-    dst->append("Source:\n");
-    mSource->dump(dst, 0, 0);
+    AudioOutputDescriptor::dump(dst, spaces, extraInfo);
+    dst->appendFormat("%*sSource:\n", spaces, "");
+    mSource->dump(dst, spaces, 0);
 }
 
 void HwAudioOutputDescriptor::toAudioPortConfig(
@@ -862,10 +881,12 @@
 
 void SwAudioOutputCollection::dump(String8 *dst) const
 {
-    dst->append("\nOutputs dump:\n");
+    dst->appendFormat("\n Outputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Output %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
@@ -884,10 +905,12 @@
 
 void HwAudioOutputCollection::dump(String8 *dst) const
 {
-    dst->append("\nOutputs dump:\n");
+    dst->appendFormat("\n Outputs (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Output %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        const std::string extraInfo = base::StringPrintf("I/O handle: %d", keyAt(i));
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size(), extraInfo.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 866417e..53cc473 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -25,15 +25,16 @@
 
 void AudioRoute::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
-    dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().c_str());
+    dst->appendFormat("%s; Sink: \"%s\"\n",
+            mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix", mSink->getTagName().c_str());
     if (mSources.size() != 0) {
-        dst->appendFormat("%*s- Sources: \n", spaces, "");
+        dst->appendFormat("%*sSources: ", spaces, "");
         for (size_t i = 0; i < mSources.size(); i++) {
-            dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().c_str());
+            dst->appendFormat("\"%s\"", mSources[i]->getTagName().c_str());
+            if (i + 1 < mSources.size()) dst->append(", ");
         }
+        dst->append("\n");
     }
-    dst->append("\n");
 }
 
 bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 141c2be..d76d0c2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -180,7 +180,7 @@
 {
     String8 extraInfo;
     if (!mTagName.empty()) {
-        extraInfo.appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.c_str());
+        extraInfo.appendFormat("\"%s\"", mTagName.c_str());
     }
 
     std::string descBaseDumpStr;
@@ -447,7 +447,7 @@
     if (isEmpty()) {
         return;
     }
-    dst->appendFormat("%*s- %s devices:\n", spaces, "", tag.string());
+    dst->appendFormat("%*s%s devices (%zu):\n", spaces, "", tag.string(), size());
     for (size_t i = 0; i < size(); i++) {
         itemAt(i)->dump(dst, spaces + 2, i, verbose);
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 3a143b0..2977f38 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -17,11 +17,13 @@
 #define LOG_TAG "APM::HwModule"
 //#define LOG_NDEBUG 0
 
-#include "HwModule.h"
-#include "IOProfile.h"
+#include <android-base/stringprintf.h>
 #include <policy.h>
 #include <system/audio.h>
 
+#include "HwModule.h"
+#include "IOProfile.h"
+
 namespace android {
 
 HwModule::HwModule(const char *name, uint32_t halVersionMajor, uint32_t halVersionMinor)
@@ -247,28 +249,28 @@
     return false;
 }
 
-void HwModule::dump(String8 *dst) const
+void HwModule::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("  - name: %s\n", getName());
-    dst->appendFormat("  - handle: %d\n", mHandle);
-    dst->appendFormat("  - version: %u.%u\n", getHalVersionMajor(), getHalVersionMinor());
+    dst->appendFormat("Handle: %d; \"%s\"\n", mHandle, getName());
     if (mOutputProfiles.size()) {
-        dst->append("  - outputs:\n");
+        dst->appendFormat("%*s- Output MixPorts (%zu):\n", spaces - 2, "", mOutputProfiles.size());
         for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-            dst->appendFormat("    output %zu:\n", i);
-            mOutputProfiles[i]->dump(dst);
+            const std::string prefix = base::StringPrintf("%*s%zu. ", spaces, "", i + 1);
+            dst->append(prefix.c_str());
+            mOutputProfiles[i]->dump(dst, prefix.size());
         }
     }
     if (mInputProfiles.size()) {
-        dst->append("  - inputs:\n");
+        dst->appendFormat("%*s- Input MixPorts (%zu):\n", spaces - 2, "", mInputProfiles.size());
         for (size_t i = 0; i < mInputProfiles.size(); i++) {
-            dst->appendFormat("    input %zu:\n", i);
-            mInputProfiles[i]->dump(dst);
+            const std::string prefix = base::StringPrintf("%*s%zu. ", spaces, "", i + 1);
+            dst->append(prefix.c_str());
+            mInputProfiles[i]->dump(dst, prefix.size());
         }
     }
-    mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
-    mDynamicDevices.dump(dst, String8("Dynamic"),  2, true);
-    dumpAudioRouteVector(mRoutes, dst, 2);
+    mDeclaredDevices.dump(dst, String8("- Declared"), spaces - 2, true);
+    mDynamicDevices.dump(dst, String8("- Dynamic"),  spaces - 2, true);
+    dumpAudioRouteVector(mRoutes, dst, spaces);
 }
 
 sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -462,10 +464,11 @@
 
 void HwModuleCollection::dump(String8 *dst) const
 {
-    dst->append("\nHW Modules dump:\n");
+    dst->appendFormat("\n Hardware modules (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- HW Module %zu:\n", i + 1);
-        itemAt(i)->dump(dst);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->append(prefix.c_str());
+        itemAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 624ad95..21f2018 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -116,28 +116,30 @@
                 return device == deviceDesc && deviceDesc->hasCurrentEncodedFormat(); }) == 1;
 }
 
-void IOProfile::dump(String8 *dst) const
+void IOProfile::dump(String8 *dst, int spaces) const
 {
-    std::string portStr;
-    AudioPort::dump(&portStr, 4);
-    dst->append(portStr.c_str());
-
-    dst->appendFormat("    - flags: 0x%04x", getFlags());
+    String8 extraInfo;
+    extraInfo.appendFormat("0x%04x", getFlags());
     std::string flagsLiteral =
             getRole() == AUDIO_PORT_ROLE_SINK ?
             toString(static_cast<audio_input_flags_t>(getFlags())) :
             getRole() == AUDIO_PORT_ROLE_SOURCE ?
             toString(static_cast<audio_output_flags_t>(getFlags())) : "";
     if (!flagsLiteral.empty()) {
-        dst->appendFormat(" (%s)", flagsLiteral.c_str());
+        extraInfo.appendFormat(" (%s)", flagsLiteral.c_str());
     }
-    dst->append("\n");
-    mSupportedDevices.dump(dst, String8("Supported"), 4, false);
-    dst->appendFormat("\n    - maxOpenCount: %u - curOpenCount: %u\n",
-             maxOpenCount, curOpenCount);
-    dst->appendFormat("    - maxActiveCount: %u - curActiveCount: %u\n",
-             maxActiveCount, curActiveCount);
-    dst->appendFormat("    - recommendedMuteDurationMs: %u ms\n", recommendedMuteDurationMs);
+
+    std::string portStr;
+    AudioPort::dump(&portStr, spaces, extraInfo.c_str());
+    dst->append(portStr.c_str());
+
+    mSupportedDevices.dump(dst, String8("- Supported"), spaces - 2, false);
+    dst->appendFormat("%*s- maxOpenCount: %u; curOpenCount: %u\n",
+            spaces - 2, "", maxOpenCount, curOpenCount);
+    dst->appendFormat("%*s- maxActiveCount: %u; curActiveCount: %u\n",
+            spaces - 2, "", maxActiveCount, curActiveCount);
+    dst->appendFormat("%*s- recommendedMuteDurationMs: %u ms\n",
+            spaces - 2, "", recommendedMuteDurationMs);
 }
 
 void IOProfile::log()
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index a747822..dc8d9cf 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -41,8 +41,9 @@
         "libaudiopolicyengineconfigurable_pfwwrapper",
 
     ],
-    shared_libs: [
+  shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 7f9c0ac..4671fe9 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -31,6 +31,7 @@
     ],
     shared_libs: [
         "libaudiofoundation",
+        "libbase",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 0b63d33..22eeadd 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -3583,7 +3583,7 @@
 void AudioPolicyManager::dump(String8 *dst) const
 {
     dst->appendFormat("\nAudioPolicyManager Dump: %p\n", this);
-    dst->appendFormat(" Primary Output: %d\n",
+    dst->appendFormat(" Primary Output I/O handle: %d\n",
              hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
     std::string stateLiteral;
     AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
@@ -3608,8 +3608,8 @@
     dst->appendFormat(" Communnication Strategy: %d\n", mCommunnicationStrategy);
     dst->appendFormat(" Config source: %s\n", mConfig.getSource().c_str()); // getConfig not const
 
-    mAvailableOutputDevices.dump(dst, String8("Available output"));
-    mAvailableInputDevices.dump(dst, String8("Available input"));
+    mAvailableOutputDevices.dump(dst, String8("\n Available output"));
+    mAvailableInputDevices.dump(dst, String8("\n Available input"));
     mHwModulesAll.dump(dst);
     mOutputs.dump(dst);
     mInputs.dump(dst);
@@ -4116,7 +4116,7 @@
                     }
                     if (outputDesc != nullptr) {
                         audio_port_config srcMixPortConfig = {};
-                        outputDesc->toAudioPortConfig(&srcMixPortConfig, &patch->sources[0]);
+                        outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
                         // for volume control, we may need a valid stream
                         srcMixPortConfig.ext.mix.usecase.stream = sourceDesc != nullptr ?
                                     sourceDesc->stream() : AUDIO_STREAM_PATCH;
@@ -5973,14 +5973,20 @@
                     client->getSecondaryOutputs().begin(),
                     client->getSecondaryOutputs().end(),
                     secondaryDescs.begin(), secondaryDescs.end())) {
-                std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
-                std::vector<audio_io_handle_t> secondaryOutputIds;
-                for (const auto& secondaryDesc : secondaryDescs) {
-                    secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
-                    weakSecondaryDescs.push_back(secondaryDesc);
+                if (!audio_is_linear_pcm(client->config().format)) {
+                    // If the format is not PCM, the tracks should be invalidated to get correct
+                    // behavior when the secondary output is changed.
+                    streamsToInvalidate.insert(client->stream());
+                } else {
+                    std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
+                    std::vector<audio_io_handle_t> secondaryOutputIds;
+                    for (const auto &secondaryDesc: secondaryDescs) {
+                        secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
+                        weakSecondaryDescs.push_back(secondaryDesc);
+                    }
+                    trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
+                    client->setSecondaryOutputs(std::move(weakSecondaryDescs));
                 }
-                trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
-                client->setSecondaryOutputs(std::move(weakSecondaryDescs));
             }
         }
     }
@@ -6148,11 +6154,11 @@
     uid_t uid;
     sp<RecordClientDescriptor> topClient = inputDesc->getHighestPriorityClient();
     if (topClient != nullptr) {
-      attributes = topClient->attributes();
-      uid = topClient->uid();
+        attributes = topClient->attributes();
+        uid = topClient->uid();
     } else {
-      attributes = { .source = AUDIO_SOURCE_DEFAULT };
-      uid = 0;
+        attributes = { .source = AUDIO_SOURCE_DEFAULT };
+        uid = 0;
     }
 
     if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
@@ -6952,8 +6958,8 @@
 {
     audio_mode_t mode = mEngine->getPhoneState();
     return (mode == AUDIO_MODE_IN_CALL)
-            || (mode == AUDIO_MODE_IN_COMMUNICATION)
-            || (mode == AUDIO_MODE_CALL_SCREEN);
+            || (mode == AUDIO_MODE_CALL_SCREEN)
+            || (mode == AUDIO_MODE_CALL_REDIRECT);
 }
 
 void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 369c587..f7442cb 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -219,7 +219,15 @@
     // can be interleaved).
     Mutex::Autolock _l(mLock);
     // TODO: check if it is more appropriate to do it in platform specific policy manager
-    AudioSystem::setMode(state);
+
+    // Audio HAL mode conversion for call redirect modes
+    audio_mode_t halMode = state;
+    if (state == AUDIO_MODE_CALL_REDIRECT) {
+        halMode = AUDIO_MODE_CALL_SCREEN;
+    } else if (state == AUDIO_MODE_COMMUNICATION_REDIRECT) {
+        halMode = AUDIO_MODE_NORMAL;
+    }
+    AudioSystem::setMode(halMode);
 
     AutoCallerClear acc;
     mAudioPolicyManager->setPhoneState(state);
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 26562e0..96da0ab 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -84,6 +84,7 @@
         "device3/Camera3OutputUtils.cpp",
         "device3/Camera3DeviceInjectionMethods.cpp",
         "device3/UHRCropAndMeteringRegionMapper.cpp",
+        "device3/PreviewFrameScheduler.cpp",
         "gui/RingBufferConsumer.cpp",
         "hidl/AidlCameraDeviceCallbacks.cpp",
         "hidl/AidlCameraServiceListener.cpp",
@@ -107,6 +108,7 @@
     ],
 
     shared_libs: [
+        "libandroid",
         "libbase",
         "libdl",
         "libexif",
@@ -154,6 +156,7 @@
         "android.hardware.camera.device@3.5",
         "android.hardware.camera.device@3.6",
         "android.hardware.camera.device@3.7",
+        "android.hardware.camera.device@3.8",
         "media_permission-aidl-cpp",
     ],
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 425c50d..2551ea5 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,6 +28,7 @@
 #include <sys/types.h>
 #include <inttypes.h>
 #include <pthread.h>
+#include <poll.h>
 
 #include <android/hardware/ICamera.h>
 #include <android/hardware/ICameraClient.h>
@@ -910,6 +911,7 @@
         case CAMERA_DEVICE_API_VERSION_3_5:
         case CAMERA_DEVICE_API_VERSION_3_6:
         case CAMERA_DEVICE_API_VERSION_3_7:
+        case CAMERA_DEVICE_API_VERSION_3_8:
             if (effectiveApiLevel == API_1) { // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
                 *client = new Camera2Client(cameraService, tmp, packageName, featureId,
@@ -2545,6 +2547,7 @@
         case CAMERA_DEVICE_API_VERSION_3_5:
         case CAMERA_DEVICE_API_VERSION_3_6:
         case CAMERA_DEVICE_API_VERSION_3_7:
+        case CAMERA_DEVICE_API_VERSION_3_8:
             ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
                     __FUNCTION__, id.string());
             *isSupported = true;
@@ -4269,6 +4272,11 @@
 
     if (dumpVector.empty()) { return; }
 
+    const String16 &packageName = client->getPackageName();
+
+    String8 packageName8 = String8(packageName);
+    const char *printablePackageName = packageName8.lockBuffer(packageName.size());
+
     std::string dumpString;
     size_t i = dumpVector.size();
 
@@ -4277,10 +4285,12 @@
          i--;
          dumpString += cameraId;
          dumpString += ":";
+         dumpString += printablePackageName;
+         dumpString += "  ";
          dumpString += dumpVector[i]; // implicitly ends with '\n'
     }
 
-    const String16 &packageName = client->getPackageName();
+    packageName8.unlockBuffer();
     mWatchedClientsDumpCache[packageName] = dumpString;
 }
 
@@ -4589,7 +4599,7 @@
     } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
         return handleSetCameraMute(args);
     } else if (args.size() >= 2 && args[0] == String16("watch")) {
-        return handleWatchCommand(args, out);
+        return handleWatchCommand(args, in, out);
     } else if (args.size() == 1 && args[0] == String16("help")) {
         printHelp(out);
         return OK;
@@ -4736,26 +4746,29 @@
     return OK;
 }
 
-status_t CameraService::handleWatchCommand(const Vector<String16>& args, int outFd) {
+status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
     if (args.size() >= 3 && args[1] == String16("start")) {
         return startWatchingTags(args, outFd);
-    } else if (args.size() == 2 && args[1] == String16("dump")) {
-        return camera3::CameraTraces::dump(outFd);
     } else if (args.size() == 2 && args[1] == String16("stop")) {
         return stopWatchingTags(outFd);
-    } else if (args.size() >= 2 && args[1] == String16("print")) {
-        return printWatchedTags(args, outFd);
+    } else if (args.size() == 2 && args[1] == String16("dump")) {
+        return printWatchedTags(outFd);
+    } else if (args.size() >= 2 && args[1] == String16("live")) {
+        return printWatchedTagsUntilInterrupt(args, inFd, outFd);
+    } else if (args.size() == 2 && args[1] == String16("clear")) {
+        return clearCachedMonitoredTagDumps(outFd);
     }
     dprintf(outFd, "Camera service watch commands:\n"
                  "  start -m <comma_separated_tag_list> [-c <comma_separated_client_list>]\n"
                  "        starts watching the provided tags for clients with provided package\n"
                  "        recognizes tag shorthands like '3a'\n"
                  "        watches all clients if no client is passed, or if 'all' is listed\n"
-                 "  dump dumps camera trace\n"
+                 "  dump dumps the monitoring information and exits\n"
                  "  stop stops watching all tags\n"
-                 "  print [-n <refresh_interval_ms>]\n"
+                 "  live [-n <refresh_interval_ms>]\n"
                  "        prints the monitored information in real time\n"
-                 "        Hit Ctrl+C to exit\n");
+                 "        Hit return to exit\n"
+                 "  clear clears all buffers storing information for watch command");
   return BAD_VALUE;
 }
 
@@ -4823,9 +4836,177 @@
     return OK;
 }
 
-status_t CameraService::printWatchedTags(const Vector<String16> &args, int outFd) {
-    // Figure outFd refresh interval, if present in args
-    useconds_t refreshTimeoutMs = 1000; // refresh every 1s by default
+status_t CameraService::clearCachedMonitoredTagDumps(int outFd) {
+    Mutex::Autolock lock(mLogLock);
+    size_t clearedSize = mWatchedClientsDumpCache.size();
+    mWatchedClientsDumpCache.clear();
+    dprintf(outFd, "Cleared tag information of %zu cached clients.\n", clearedSize);
+    return OK;
+}
+
+status_t CameraService::printWatchedTags(int outFd) {
+    Mutex::Autolock logLock(mLogLock);
+    std::set<String16> connectedMonitoredClients;
+
+    bool printedSomething = false; // tracks if any monitoring information was printed
+                                   // (from either cached or active clients)
+
+    bool serviceLock = tryLock(mServiceLock);
+    // get all watched clients that are currently connected
+    for (const auto &clientDescriptor: mActiveClientManager.getAll()) {
+        if (clientDescriptor == nullptr) { continue; }
+
+        sp<BasicClient> client = clientDescriptor->getValue();
+        if (client.get() == nullptr) { continue; }
+        if (!isClientWatchedLocked(client.get())) { continue; }
+
+        std::vector<std::string> dumpVector;
+        client->dumpWatchedEventsToVector(dumpVector);
+
+        size_t printIdx = dumpVector.size();
+        if (printIdx == 0) {
+            continue;
+        }
+
+        // Print tag dumps for active client
+        const String8 &cameraId = clientDescriptor->getKey();
+        String8 packageName8 = String8(client->getPackageName());
+        const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+        dprintf(outFd, "Client: %s (active)\n", printablePackageName);
+        while(printIdx > 0) {
+            printIdx--;
+            dprintf(outFd, "%s:%s  %s", cameraId.string(), printablePackageName,
+                    dumpVector[printIdx].c_str());
+        }
+        dprintf(outFd, "\n");
+        packageName8.unlockBuffer();
+        printedSomething = true;
+
+        connectedMonitoredClients.emplace(client->getPackageName());
+    }
+    if (serviceLock) { mServiceLock.unlock(); }
+
+    // Print entries in mWatchedClientsDumpCache for clients that are not connected
+    for (const auto &kv: mWatchedClientsDumpCache) {
+        const String16 &package = kv.first;
+        if (connectedMonitoredClients.find(package) != connectedMonitoredClients.end()) {
+            continue;
+        }
+
+        dprintf(outFd, "Client: %s (cached)\n", String8(package).string());
+        dprintf(outFd, "%s\n", kv.second.c_str());
+        printedSomething = true;
+    }
+
+    if (!printedSomething) {
+        dprintf(outFd, "No monitoring information to print.\n");
+    }
+
+    return OK;
+}
+
+// Print all events in vector `events' that came after lastPrintedEvent
+void printNewWatchedEvents(int outFd,
+                           const char *cameraId,
+                           const String16 &packageName,
+                           const std::vector<std::string> &events,
+                           const std::string &lastPrintedEvent) {
+    if (events.empty()) { return; }
+
+    // index of lastPrintedEvent in events.
+    // lastPrintedIdx = events.size() if lastPrintedEvent is not in events
+    size_t lastPrintedIdx;
+    for (lastPrintedIdx = 0;
+         lastPrintedIdx < events.size() && lastPrintedEvent != events[lastPrintedIdx];
+         lastPrintedIdx++);
+
+    if (lastPrintedIdx == 0) { return; } // early exit if no new event in `events`
+
+    String8 packageName8(packageName);
+    const char *printablePackageName = packageName8.lockBuffer(packageName8.size());
+
+    // print events in chronological order (latest event last)
+    size_t idxToPrint = lastPrintedIdx;
+    do {
+        idxToPrint--;
+        dprintf(outFd, "%s:%s  %s", cameraId, printablePackageName, events[idxToPrint].c_str());
+    } while (idxToPrint != 0);
+
+    packageName8.unlockBuffer();
+}
+
+// Returns true if adb shell cmd watch should be interrupted based on data in inFd. The watch
+// command should be interrupted if the user presses the return key, or if user loses any way to
+// signal interrupt.
+// If timeoutMs == 0, this function will always return false
+bool shouldInterruptWatchCommand(int inFd, int outFd, long timeoutMs) {
+    struct timeval startTime;
+    int startTimeError = gettimeofday(&startTime, nullptr);
+    if (startTimeError) {
+        dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+        return true;
+    }
+
+    const nfds_t numFds = 1;
+    struct pollfd pollFd = { .fd = inFd, .events = POLLIN, .revents = 0 };
+
+    struct timeval currTime;
+    char buffer[2];
+    while(true) {
+        int currTimeError = gettimeofday(&currTime, nullptr);
+        if (currTimeError) {
+            dprintf(outFd, "Failed waiting for interrupt, aborting.\n");
+            return true;
+        }
+
+        long elapsedTimeMs = ((currTime.tv_sec - startTime.tv_sec) * 1000L)
+                + ((currTime.tv_usec - startTime.tv_usec) / 1000L);
+        int remainingTimeMs = (int) (timeoutMs - elapsedTimeMs);
+
+        if (remainingTimeMs <= 0) {
+            // No user interrupt within timeoutMs, don't interrupt watch command
+            return false;
+        }
+
+        int numFdsUpdated = poll(&pollFd, numFds, remainingTimeMs);
+        if (numFdsUpdated < 0) {
+            dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+            return true;
+        }
+
+        if (numFdsUpdated == 0) {
+            // No user input within timeoutMs, don't interrupt watch command
+            return false;
+        }
+
+        if (!(pollFd.revents & POLLIN)) {
+            dprintf(outFd, "Failed while waiting for user input. Exiting.\n");
+            return true;
+        }
+
+        ssize_t sizeRead = read(inFd, buffer, sizeof(buffer) - 1);
+        if (sizeRead < 0) {
+            dprintf(outFd, "Error reading user input. Exiting.\n");
+            return true;
+        }
+
+        if (sizeRead == 0) {
+            // Reached end of input fd (can happen if input is piped)
+            // User has no way to signal an interrupt, so interrupt here
+            return true;
+        }
+
+        if (buffer[0] == '\n') {
+            // User pressed return, interrupt watch command.
+            return true;
+        }
+    }
+}
+
+status_t CameraService::printWatchedTagsUntilInterrupt(const Vector<String16> &args,
+                                                       int inFd, int outFd) {
+    // Figure out refresh interval, if present in args
+    long refreshTimeoutMs = 1000L; // refresh every 1s by default
     if (args.size() > 2) {
         size_t intervalIdx; // index of '-n'
         for (intervalIdx = 2; intervalIdx < args.size() && String16("-n") != args[intervalIdx];
@@ -4838,107 +5019,51 @@
         }
     }
 
-    mLogLock.lock();
-    bool serviceLock = tryLock(mServiceLock);
-    // get all watched clients that are currently connected
-    std::set<String16> connectedMoniterdClients;
-    for (const auto &clientDescriptor: mActiveClientManager.getAll()) {
-        if (clientDescriptor == nullptr) { continue; }
+    // Set min timeout of 10ms. This prevents edge cases in polling when timeout of 0 is passed.
+    refreshTimeoutMs = refreshTimeoutMs < 10 ? 10 : refreshTimeoutMs;
 
-        sp<BasicClient> client = clientDescriptor->getValue();
-        if (client.get() == nullptr) { continue; }
-        if (!isClientWatchedLocked(client.get())) { continue; }
+    dprintf(outFd, "Press return to exit...\n\n");
+    std::map<String16, std::string> packageNameToLastEvent;
 
-        connectedMoniterdClients.emplace(client->getPackageName());
-    }
-    if (serviceLock) { mServiceLock.unlock(); }
-
-    // Print entries in mWatchedClientsDumpCache for clients that are not connected
-    for (const auto &kv: mWatchedClientsDumpCache) {
-        const String16 &package = kv.first;
-        if (connectedMoniterdClients.find(package) != connectedMoniterdClients.end()) {
-            continue;
-        }
-
-        dprintf(outFd, "Client: %s\n", String8(package).string());
-        dprintf(outFd, "%s\n", kv.second.c_str());
-    }
-    mLogLock.unlock();
-
-    if (connectedMoniterdClients.empty()) {
-        dprintf(outFd, "No watched client active.\n");
-        return OK;
-    }
-
-    // For connected watched clients, print monitored tags live
-    return printWatchedTagsUntilInterrupt(refreshTimeoutMs  * 1000, outFd);
-}
-
-status_t CameraService::printWatchedTagsUntilInterrupt(useconds_t refreshMicros, int outFd) {
-    std::unordered_map<std::string, std::string> cameraToLastEvent;
-    auto cameraClients = mActiveClientManager.getAll();
-
-    if (cameraClients.empty()) {
-        dprintf(outFd, "No clients connected.\n");
-        return OK;
-    }
-
-    dprintf(outFd, "Press Ctrl + C to exit...\n\n");
     while (true) {
+        bool serviceLock = tryLock(mServiceLock);
+        auto cameraClients = mActiveClientManager.getAll();
+        if (serviceLock) { mServiceLock.unlock(); }
+
         for (const auto& clientDescriptor : cameraClients) {
             Mutex::Autolock lock(mLogLock);
             if (clientDescriptor == nullptr) { continue; }
-            const char* cameraId = clientDescriptor->getKey().string();
-
-            // This also initializes the map entries with an empty string
-            const std::string& lastPrintedEvent = cameraToLastEvent[cameraId];
 
             sp<BasicClient> client = clientDescriptor->getValue();
             if (client.get() == nullptr) { continue; }
             if (!isClientWatchedLocked(client.get())) { continue; }
 
+            const String16 &packageName = client->getPackageName();
+            // This also initializes the map entries with an empty string
+            const std::string& lastPrintedEvent = packageNameToLastEvent[packageName];
+
             std::vector<std::string> latestEvents;
             client->dumpWatchedEventsToVector(latestEvents);
 
             if (!latestEvents.empty()) {
+                String8 cameraId = clientDescriptor->getKey();
+                const char *printableCameraId = cameraId.lockBuffer(cameraId.size());
                 printNewWatchedEvents(outFd,
-                                      cameraId,
-                                      client->getPackageName(),
+                                      printableCameraId,
+                                      packageName,
                                       latestEvents,
                                       lastPrintedEvent);
-                cameraToLastEvent[cameraId] = latestEvents[0];
+                packageNameToLastEvent[packageName] = latestEvents[0];
+                cameraId.unlockBuffer();
             }
         }
-        usleep(refreshMicros);  // convert ms to us
+        if (shouldInterruptWatchCommand(inFd, outFd, refreshTimeoutMs)) {
+            break;
+        }
     }
     return OK;
 }
 
-void CameraService::printNewWatchedEvents(int outFd,
-                                          const char *cameraId,
-                                          const String16 &packageName,
-                                          const std::vector<std::string> &events,
-                                          const std::string &lastPrintedEvent) {
-    if (events.empty()) { return; }
-
-    // index of lastPrintedEvent in events.
-    // lastPrintedIdx = events.size() if lastPrintedEvent is not in events
-    size_t lastPrintedIdx;
-    for (lastPrintedIdx = 0;
-         lastPrintedIdx < events.size() && lastPrintedEvent != events[lastPrintedIdx];
-         lastPrintedIdx++);
-
-    if (lastPrintedIdx == 0) { return; } // early exit if no new event in `events`
-
-    const char *printPackageName = String8(packageName).string();
-    // print events in chronological order (latest event last)
-    size_t idxToPrint = lastPrintedIdx;
-    do {
-        idxToPrint--;
-        dprintf(outFd, "%s:%s  %s", cameraId, printPackageName, events[idxToPrint].c_str());
-    } while (idxToPrint != 0);
-}
-
 void CameraService::parseClientsToWatchLocked(String8 clients) {
     mWatchedClientPackages.clear();
 
@@ -4975,7 +5100,7 @@
         "      Valid values 0=OFF, 1=ON for JPEG\n"
         "  get-image-dump-mask returns the current image-dump-mask value\n"
         "  set-camera-mute <0/1> enable or disable camera muting\n"
-        "  watch <start|stop|dump|live> manages tag monitoring in connected clients\n"
+        "  watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
         "  help print this message\n");
 }
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index cb26763..a3125c6 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -1172,7 +1172,7 @@
     status_t handleSetCameraMute(const Vector<String16>& args);
 
     // Handle 'watch' command as passed through 'cmd'
-    status_t handleWatchCommand(const Vector<String16> &args, int outFd);
+    status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
 
     // Enable tag monitoring of the given tags in provided clients
     status_t startWatchingTags(const Vector<String16> &args, int outFd);
@@ -1180,20 +1180,16 @@
     // Disable tag monitoring
     status_t stopWatchingTags(int outFd);
 
+    // Clears mWatchedClientsDumpCache
+    status_t clearCachedMonitoredTagDumps(int outFd);
+
     // Print events of monitored tags in all cached and attached clients
-    status_t printWatchedTags(const Vector<String16> &args, int outFd);
+    status_t printWatchedTags(int outFd);
 
     // Print events of monitored tags in all attached clients as they are captured. New events are
-    // fetched every `refreshMicros` us
-    // NOTE: This function does not terminate unless interrupted.
-    status_t printWatchedTagsUntilInterrupt(useconds_t refreshMicros, int outFd);
-
-    // Print all events in vector `events' that came after lastPrintedEvent
-    static void printNewWatchedEvents(int outFd,
-                                      const char *cameraId,
-                                      const String16 &packageName,
-                                      const std::vector<std::string> &events,
-                                      const std::string &lastPrintedEvent);
+    // fetched every `refreshMillis` ms
+    // NOTE: This function does not terminate until user passes '\n' to inFd.
+    status_t printWatchedTagsUntilInterrupt(const Vector<String16> &args, int inFd, int outFd);
 
     // Parses comma separated clients list and adds them to mWatchedClientPackages.
     // Does not acquire mLogLock before modifying mWatchedClientPackages. It is the caller's
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index fdb2673..f28d128 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -88,6 +88,7 @@
 #define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
 #define CAMERA_DEVICE_API_VERSION_3_6 HARDWARE_DEVICE_API_VERSION(3, 6)
 #define CAMERA_DEVICE_API_VERSION_3_7 HARDWARE_DEVICE_API_VERSION(3, 7)
+#define CAMERA_DEVICE_API_VERSION_3_8 HARDWARE_DEVICE_API_VERSION(3, 8)
 
 /**
  * A manager for all camera providers available on an Android device.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index cbea9a2..3742a17 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1176,6 +1176,16 @@
 
 hardware::Return<void> Camera3Device::notify(
         const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
+    return notifyHelper<hardware::camera::device::V3_2::NotifyMsg>(msgs);
+}
+
+hardware::Return<void> Camera3Device::notify_3_8(
+        const hardware::hidl_vec<hardware::camera::device::V3_8::NotifyMsg>& msgs) {
+    return notifyHelper<hardware::camera::device::V3_8::NotifyMsg>(msgs);
+}
+
+template<typename NotifyMsgType>
+hardware::Return<void> Camera3Device::notifyHelper(const hardware::hidl_vec<NotifyMsgType>& msgs) {
     // Ideally we should grab mLock, but that can lead to deadlock, and
     // it's not super important to get up to date value of mStatus for this
     // warning print, hence skipping the lock here
@@ -5469,7 +5479,8 @@
                     outputBuffers->editItemAt(i).acquire_fence = -1;
                 }
                 outputBuffers->editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i],
+                        /*timestamp*/0, /*readoutTimestamp*/0,
                         /*timestampIncreasing*/true, std::vector<size_t> (),
                         captureRequest->mResultExtras.frameNumber);
             }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e0ea7a1..d08c41f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -39,6 +39,7 @@
 #include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
 #include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
 #include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
 #include <fmq/MessageQueue.h>
 
 #include <camera/CaptureResult.h>
@@ -83,7 +84,7 @@
  */
 class Camera3Device :
             public CameraDeviceBase,
-            virtual public hardware::camera::device::V3_5::ICameraDeviceCallback,
+            virtual public hardware::camera::device::V3_8::ICameraDeviceCallback,
             public camera3::SetErrorInterface,
             public camera3::InflightRequestUpdateInterface,
             public camera3::RequestBufferInterface,
@@ -642,6 +643,14 @@
             const hardware::hidl_vec<
                     hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
 
+    hardware::Return<void> notify_3_8(
+            const hardware::hidl_vec<
+                    hardware::camera::device::V3_8::NotifyMsg>& msgs) override;
+
+    template<typename NotifyMsgType>
+    hardware::Return<void> notifyHelper(
+            const hardware::hidl_vec<NotifyMsgType>& msgs);
+
     // Handle one notify message
     void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
 
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
index b121e5d..61e43cb 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -48,7 +48,7 @@
 
 status_t Camera3FakeStream::returnBufferLocked(
         const camera_stream_buffer &,
-        nsecs_t, int32_t, const std::vector<size_t>&) {
+        nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ATRACE_CALL();
     ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
     return INVALID_OPERATION;
@@ -56,7 +56,7 @@
 
 status_t Camera3FakeStream::returnBufferCheckedLocked(
             const camera_stream_buffer &,
-            nsecs_t,
+            nsecs_t, nsecs_t,
             bool,
             int32_t,
             const std::vector<size_t>&,
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index c11a3e4..df19c3d 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -108,6 +108,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
@@ -135,7 +136,8 @@
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
-            nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids);
+            nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
+            const std::vector<size_t>& surface_ids);
 
     virtual status_t configureQueueLocked();
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 7b128e0..f4b3197 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -224,6 +224,7 @@
 status_t Camera3IOStreamBase::returnAnyBufferLocked(
         const camera_stream_buffer &buffer,
         nsecs_t timestamp,
+        nsecs_t readoutTimestamp,
         bool output,
         int32_t transform,
         const std::vector<size_t>& surface_ids) {
@@ -242,7 +243,8 @@
     }
 
     sp<Fence> releaseFence;
-    res = returnBufferCheckedLocked(buffer, timestamp, output, transform, surface_ids,
+    res = returnBufferCheckedLocked(buffer, timestamp, readoutTimestamp,
+                                    output, transform, surface_ids,
                                     &releaseFence);
     // Res may be an error, but we still want to decrement our owned count
     // to enable clean shutdown. So we'll just return the error but otherwise
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 6135b7e..fb73c97 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -67,6 +67,7 @@
     status_t         returnAnyBufferLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
@@ -74,6 +75,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 6eb798e..9a3f7ed 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -105,6 +105,7 @@
 status_t Camera3InputStream::returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t /*transform*/,
             const std::vector<size_t>&,
@@ -112,6 +113,7 @@
             sp<Fence> *releaseFenceOut) {
 
     (void)timestamp;
+    (void)readoutTimestamp;
     (void)output;
     ALOG_ASSERT(!output, "Expected output to be false");
 
@@ -176,7 +178,8 @@
         const camera_stream_buffer &buffer) {
     ATRACE_CALL();
 
-    return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false, /*transform*/ -1);
+    return returnAnyBufferLocked(buffer, /*timestamp*/0, /*readoutTimestamp*/0,
+                                 /*output*/false, /*transform*/ -1);
 }
 
 status_t Camera3InputStream::getInputBufferProducerLocked(
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 6f66bca..5e0587b 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -61,6 +61,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 3738d01..0dfeac3 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -22,6 +22,7 @@
 #include <fstream>
 
 #include <android-base/unique_fd.h>
+#include <cutils/properties.h>
 #include <ui/GraphicBuffer.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -263,14 +264,16 @@
 
 status_t Camera3OutputStream::returnBufferLocked(
         const camera_stream_buffer &buffer,
-        nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids) {
+        nsecs_t timestamp, nsecs_t readoutTimestamp,
+        int32_t transform, const std::vector<size_t>& surface_ids) {
     ATRACE_HFR_CALL();
 
     if (mHandoutTotalBufferCount == 1) {
         returnPrefetchedBuffersLocked();
     }
 
-    status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true, transform, surface_ids);
+    status_t res = returnAnyBufferLocked(buffer, timestamp, readoutTimestamp,
+                                         /*output*/true, transform, surface_ids);
 
     if (res != OK) {
         return res;
@@ -285,6 +288,7 @@
 status_t Camera3OutputStream::returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
@@ -347,20 +351,6 @@
             mTraceFirstBuffer = false;
         }
 
-        if (transform != -1) {
-            setTransformLocked(transform);
-        }
-
-        /* Certain consumers (such as AudioSource or HardwareComposer) use
-         * MONOTONIC time, causing time misalignment if camera timestamp is
-         * in BOOTTIME. Do the conversion if necessary. */
-        res = native_window_set_buffers_timestamp(mConsumer.get(),
-                mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
-                  __FUNCTION__, mId, strerror(-res), res);
-            return res;
-        }
         // If this is a JPEG output, and image dump mask is set, save image to
         // disk.
         if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF &&
@@ -368,10 +358,32 @@
             dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
         }
 
-        res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
-        if (shouldLogError(res, state)) {
-            ALOGE("%s: Stream %d: Error queueing buffer to native window:"
-                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        /* Certain consumers (such as AudioSource or HardwareComposer) use
+         * MONOTONIC time, causing time misalignment if camera timestamp is
+         * in BOOTTIME. Do the conversion if necessary. */
+        nsecs_t t = mPreviewFrameScheduler != nullptr ? readoutTimestamp : timestamp;
+        nsecs_t adjustedTs = mUseMonoTimestamp ? t - mTimestampOffset : t;
+        if (mPreviewFrameScheduler != nullptr) {
+            res = mPreviewFrameScheduler->queuePreviewBuffer(adjustedTs, transform,
+                    anwBuffer, anwReleaseFence);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Error queuing buffer to preview buffer scheduler: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+        } else {
+            setTransform(transform);
+            res = native_window_set_buffers_timestamp(mConsumer.get(), adjustedTs);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+                      __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+            res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
+            if (shouldLogError(res, state)) {
+                ALOGE("%s: Stream %d: Error queueing buffer to native window:"
+                      " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+            }
         }
     }
     mLock.lock();
@@ -412,6 +424,9 @@
 
 status_t Camera3OutputStream::setTransformLocked(int transform) {
     status_t res = OK;
+
+    if (transform == -1) return res;
+
     if (mState == STATE_ERROR) {
         ALOGE("%s: Stream in error state", __FUNCTION__);
         return INVALID_OPERATION;
@@ -437,7 +452,7 @@
         return res;
     }
 
-    if ((res = configureConsumerQueueLocked()) != OK) {
+    if ((res = configureConsumerQueueLocked(true /*allowPreviewScheduler*/)) != OK) {
         return res;
     }
 
@@ -461,7 +476,7 @@
     return OK;
 }
 
-status_t Camera3OutputStream::configureConsumerQueueLocked() {
+status_t Camera3OutputStream::configureConsumerQueueLocked(bool allowPreviewScheduler) {
     status_t res;
 
     mTraceFirstBuffer = true;
@@ -547,6 +562,15 @@
     }
 
     mTotalBufferCount = maxConsumerBuffers + camera_stream::max_buffers;
+    if (allowPreviewScheduler && isConsumedByHWComposer()) {
+        // We cannot distinguish between a SurfaceView and an ImageReader of
+        // preview buffer format. The PreviewFrameScheduler needs to handle both.
+        if (!property_get_bool("camera.disable_preview_scheduler", false)) {
+            mPreviewFrameScheduler = std::make_unique<PreviewFrameScheduler>(*this, mConsumer);
+            mTotalBufferCount += PreviewFrameScheduler::kQueueDepthWatermark;
+        }
+    }
+
     mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
     mLastTimestamp = 0;
@@ -1185,6 +1209,11 @@
     }
 }
 
+bool Camera3OutputStream::shouldLogError(status_t res) {
+    Mutex::Autolock l(mLock);
+    return shouldLogError(res, mState);
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 0872687..a70b883 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -27,6 +27,7 @@
 #include "Camera3IOStreamBase.h"
 #include "Camera3OutputStreamInterface.h"
 #include "Camera3BufferManager.h"
+#include "PreviewFrameScheduler.h"
 
 namespace android {
 
@@ -229,6 +230,7 @@
     static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
 
     void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+    bool shouldLogError(status_t res);
 
   protected:
     Camera3OutputStream(int id, camera_stream_type_t type,
@@ -245,6 +247,7 @@
     virtual status_t returnBufferCheckedLocked(
             const camera_stream_buffer &buffer,
             nsecs_t timestamp,
+            nsecs_t readoutTimestamp,
             bool output,
             int32_t transform,
             const std::vector<size_t>& surface_ids,
@@ -255,7 +258,7 @@
 
     status_t getEndpointUsageForSurface(uint64_t *usage,
             const sp<Surface>& surface) const;
-    status_t configureConsumerQueueLocked();
+    status_t configureConsumerQueueLocked(bool allowPreviewScheduler);
 
     // Consumer as the output of camera HAL
     sp<Surface> mConsumer;
@@ -333,7 +336,8 @@
 
     virtual status_t returnBufferLocked(
             const camera_stream_buffer &buffer,
-            nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids);
+            nsecs_t timestamp, nsecs_t readoutTimestamp,
+            int32_t transform, const std::vector<size_t>& surface_ids);
 
     virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
             ANativeWindowBuffer* buffer, int anwReleaseFence,
@@ -370,6 +374,8 @@
 
     int mImageDumpMask = 0;
 
+    // The preview stream scheduler for re-timing frames
+    std::unique_ptr<PreviewFrameScheduler> mPreviewFrameScheduler;
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index d765b02..0d79b54 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -463,7 +463,7 @@
         returnOutputBuffers(
             states.useHalBufManager, states.listener,
             request.pendingOutputBuffers.array(),
-            request.pendingOutputBuffers.size(), 0,
+            request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
             /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
             /*timestampIncreasing*/true,
             request.outputSurfaces, request.resultExtras,
@@ -870,9 +870,9 @@
         bool useHalBufManager,
         sp<NotificationListener> listener,
         const camera_stream_buffer_t *outputBuffers, size_t numBuffers,
-        nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
-        SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing,
-        const SurfaceMap& outputSurfaces,
+        nsecs_t timestamp, nsecs_t readoutTimestamp, bool requested,
+        nsecs_t requestTimeNs, SessionStatsBuilder& sessionStatsBuilder,
+        bool timestampIncreasing, const SurfaceMap& outputSurfaces,
         const CaptureResultExtras &inResultExtras,
         ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
 
@@ -916,11 +916,11 @@
                 errorBufStrategy != ERROR_BUF_CACHE) {
             if (it != outputSurfaces.end()) {
                 res = stream->returnBuffer(
-                        outputBuffers[i], timestamp, timestampIncreasing, it->second,
-                        inResultExtras.frameNumber, transform);
+                        outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
+                        it->second, inResultExtras.frameNumber, transform);
             } else {
                 res = stream->returnBuffer(
-                        outputBuffers[i], timestamp, timestampIncreasing,
+                        outputBuffers[i], timestamp, readoutTimestamp, timestampIncreasing,
                         std::vector<size_t> (), inResultExtras.frameNumber, transform);
             }
         }
@@ -951,7 +951,7 @@
             // cancel the buffer
             camera_stream_buffer_t sb = outputBuffers[i];
             sb.status = CAMERA_BUFFER_STATUS_ERROR;
-            stream->returnBuffer(sb, /*timestamp*/0,
+            stream->returnBuffer(sb, /*timestamp*/0, /*readoutTimestamp*/0,
                     timestampIncreasing, std::vector<size_t> (),
                     inResultExtras.frameNumber, transform);
 
@@ -973,8 +973,8 @@
     returnOutputBuffers(useHalBufManager, listener,
             request.pendingOutputBuffers.array(),
             request.pendingOutputBuffers.size(),
-            request.shutterTimestamp, /*requested*/true,
-            request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
+            request.shutterTimestamp, request.shutterReadoutTimestamp,
+            /*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
             request.outputSurfaces, request.resultExtras,
             request.errorBufStrategy, request.transform);
 
@@ -1035,6 +1035,7 @@
             }
 
             r.shutterTimestamp = msg.timestamp;
+            r.shutterReadoutTimestamp = msg.readout_timestamp;
             if (r.hasCallback) {
                 ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
                     states.cameraId.string(), __FUNCTION__,
@@ -1193,7 +1194,30 @@
 }
 
 void notify(CaptureOutputStates& states,
-        const hardware::camera::device::V3_2::NotifyMsg& msg) {
+        const hardware::camera::device::V3_8::NotifyMsg& msg) {
+    using android::hardware::camera::device::V3_2::MsgType;
+
+    hardware::camera::device::V3_2::NotifyMsg msg_3_2;
+    msg_3_2.type = msg.type;
+    bool hasReadoutTime = false;
+    uint64_t readoutTime = 0;
+    switch (msg.type) {
+        case MsgType::ERROR:
+            msg_3_2.msg.error = msg.msg.error;
+            break;
+        case MsgType::SHUTTER:
+            msg_3_2.msg.shutter = msg.msg.shutter.v3_2;
+            hasReadoutTime = true;
+            readoutTime = msg.msg.shutter.readoutTimestamp;
+            break;
+    }
+    notify(states, msg_3_2, hasReadoutTime, readoutTime);
+}
+
+void notify(CaptureOutputStates& states,
+        const hardware::camera::device::V3_2::NotifyMsg& msg,
+        bool hasReadoutTime, uint64_t readoutTime) {
+
     using android::hardware::camera::device::V3_2::MsgType;
     using android::hardware::camera::device::V3_2::ErrorCode;
 
@@ -1234,6 +1258,8 @@
             m.type = CAMERA_MSG_SHUTTER;
             m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
             m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+            m.message.shutter.readout_timestamp = hasReadoutTime ?
+                    readoutTime : m.message.shutter.timestamp;
             break;
     }
     notify(states, &m);
@@ -1416,7 +1442,8 @@
                 sb.status = CAMERA_BUFFER_STATUS_ERROR;
             }
             returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                    streamBuffers.data(), numAllocatedBuffers, 0, /*requested*/false,
+                    streamBuffers.data(), numAllocatedBuffers, /*timestamp*/0,
+                    /*readoutTimestamp*/0, /*requested*/false,
                     /*requestTimeNs*/0, states.sessionStatsBuilder);
             for (auto buf : newBuffers) {
                 states.bufferRecordsIntf.removeOneBufferCache(streamId, buf);
@@ -1477,8 +1504,8 @@
         }
         streamBuffer.stream = stream->asHalStream();
         returnOutputBuffers(states.useHalBufManager, /*listener*/nullptr,
-                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*requested*/false,
-                /*requestTimeNs*/0, states.sessionStatsBuilder);
+                &streamBuffer, /*size*/1, /*timestamp*/ 0, /*readoutTimestamp*/0,
+                /*requested*/false, /*requestTimeNs*/0, states.sessionStatsBuilder);
     }
 }
 
@@ -1491,9 +1518,10 @@
             returnOutputBuffers(
                 states.useHalBufManager, states.listener,
                 request.pendingOutputBuffers.array(),
-                request.pendingOutputBuffers.size(), 0, /*requested*/true,
-                request.requestTimeNs, states.sessionStatsBuilder, /*timestampIncreasing*/true,
-                request.outputSurfaces, request.resultExtras, request.errorBufStrategy);
+                request.pendingOutputBuffers.size(), /*timestamp*/0, /*readoutTimestamp*/0,
+                /*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
+                /*timestampIncreasing*/true, request.outputSurfaces, request.resultExtras,
+                request.errorBufStrategy);
             ALOGW("%s: Frame %d |  Timestamp: %" PRId64 ", metadata"
                     " arrived: %s, buffers left: %d.\n", __FUNCTION__,
                     states.inflightMap.keyAt(idx), request.shutterTimestamp,
@@ -1565,7 +1593,7 @@
                 switch (halStream->stream_type) {
                     case CAMERA_STREAM_OUTPUT:
                         res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0,
-                                /*timestampIncreasing*/true,
+                                /*readoutTimestamp*/0, /*timestampIncreasing*/true,
                                 std::vector<size_t> (), frameNumber);
                         if (res != OK) {
                             ALOGE("%s: Can't return output buffer for frame %d to"
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 98fbab5..51899ee 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -26,6 +26,8 @@
 
 #include <common/CameraDeviceBase.h>
 
+#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
+
 #include "device3/BufferUtils.h"
 #include "device3/DistortionMapper.h"
 #include "device3/ZoomRatioMapper.h"
@@ -52,7 +54,8 @@
             bool useHalBufManager,
             sp<NotificationListener> listener, // Only needed when outputSurfaces is not empty
             const camera_stream_buffer_t *outputBuffers,
-            size_t numBuffers, nsecs_t timestamp, bool requested, nsecs_t requestTimeNs,
+            size_t numBuffers, nsecs_t timestamp,
+            nsecs_t readoutTimestamp, bool requested, nsecs_t requestTimeNs,
             SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing = true,
             // The following arguments are only meant for surface sharing use case
             const SurfaceMap& outputSurfaces = SurfaceMap{},
@@ -119,7 +122,10 @@
 
     // Handle one notify message
     void notify(CaptureOutputStates& states,
-            const hardware::camera::device::V3_2::NotifyMsg& msg);
+            const hardware::camera::device::V3_2::NotifyMsg& msg,
+            bool hasReadoutTime = false, uint64_t readoutTime = 0LL);
+    void notify(CaptureOutputStates& states,
+            const hardware::camera::device::V3_8::NotifyMsg& msg);
 
     struct RequestBufferStates {
         const String8& cameraId;
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 15cf7f4..9e0c8f3 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -247,7 +247,7 @@
         return res;
     }
 
-    res = configureConsumerQueueLocked();
+    res = configureConsumerQueueLocked(false/*allowPreviewScheduler*/);
     if (res != OK) {
         ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
         return res;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index afcfd2a..1405fa1 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -557,7 +557,8 @@
     for (size_t i = 0; i < mPreparedBufferIdx; i++) {
         mPreparedBuffers.editItemAt(i).release_fence = -1;
         mPreparedBuffers.editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
-        returnBufferLocked(mPreparedBuffers[i], 0, /*transform*/ -1);
+        returnBufferLocked(mPreparedBuffers[i], /*timestamp*/0, /*readoutTimestamp*/0,
+                /*transform*/ -1);
     }
     mPreparedBuffers.clear();
     mPreparedBufferIdx = 0;
@@ -713,7 +714,7 @@
 }
 
 status_t Camera3Stream::returnBuffer(const camera_stream_buffer &buffer,
-        nsecs_t timestamp, bool timestampIncreasing,
+        nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
          const std::vector<size_t>& surface_ids, uint64_t frameNumber, int32_t transform) {
     ATRACE_HFR_CALL();
     Mutex::Autolock l(mLock);
@@ -743,7 +744,7 @@
      *
      * Do this for getBuffer as well.
      */
-    status_t res = returnBufferLocked(b, timestamp, transform, surface_ids);
+    status_t res = returnBufferLocked(b, timestamp, readoutTimestamp, transform, surface_ids);
     if (res == OK) {
         fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
     }
@@ -931,7 +932,7 @@
 }
 
 status_t Camera3Stream::returnBufferLocked(const camera_stream_buffer &,
-                                           nsecs_t, int32_t, const std::vector<size_t>&) {
+                                           nsecs_t, nsecs_t, int32_t, const std::vector<size_t>&) {
     ALOGE("%s: This type of stream does not support output", __FUNCTION__);
     return INVALID_OPERATION;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index fc75f79..17041de 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -352,7 +352,7 @@
      * For bidirectional streams, this method applies to the output-side buffers
      */
     status_t         returnBuffer(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, bool timestampIncreasing,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing,
             const std::vector<size_t>& surface_ids = std::vector<size_t>(),
             uint64_t frameNumber = 0, int32_t transform = -1);
 
@@ -517,7 +517,7 @@
     virtual status_t getBufferLocked(camera_stream_buffer *buffer,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
     virtual status_t returnBufferLocked(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, int32_t transform,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, int32_t transform,
             const std::vector<size_t>& surface_ids = std::vector<size_t>());
 
     virtual status_t getBuffersLocked(std::vector<OutstandingBuffer>*);
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 3aa5a3c..5f20f17 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -357,7 +357,7 @@
      * For bidirectional streams, this method applies to the output-side buffers
      */
     virtual status_t returnBuffer(const camera_stream_buffer &buffer,
-            nsecs_t timestamp, bool timestampIncreasing = true,
+            nsecs_t timestamp, nsecs_t readoutTimestamp, bool timestampIncreasing = true,
             const std::vector<size_t>& surface_ids = std::vector<size_t>(),
             uint64_t frameNumber = 0, int32_t transform = -1) = 0;
 
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index e752c8c..0c97f3e 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -65,6 +65,7 @@
 typedef struct camera_shutter_msg {
     uint32_t frame_number;
     uint64_t timestamp;
+    uint64_t readout_timestamp;
 } camera_shutter_msg_t;
 
 typedef struct camera_error_msg {
@@ -101,9 +102,10 @@
 } ERROR_BUF_STRATEGY;
 
 struct InFlightRequest {
-
     // Set by notify() SHUTTER call.
     nsecs_t shutterTimestamp;
+    // Set by notify() SHUTTER call with readout time.
+    nsecs_t shutterReadoutTimestamp;
     // Set by process_capture_result().
     nsecs_t sensorTimestamp;
     int     requestStatus;
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
new file mode 100644
index 0000000..1fbdb18
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-PreviewFrameScheduler"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <android/looper.h>
+#include "PreviewFrameScheduler.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Internal Choreographer thread implementation for polling and handling callbacks
+ */
+
+// Callback function for Choreographer
+static void frameCallback(const AChoreographerFrameCallbackData* callbackData, void* data) {
+    PreviewFrameScheduler* parent = static_cast<PreviewFrameScheduler*>(data);
+    if (parent == nullptr) {
+        ALOGE("%s: Invalid data for Choreographer callback!", __FUNCTION__);
+        return;
+    }
+
+    size_t length = AChoreographerFrameCallbackData_getFrameTimelinesLength(callbackData);
+    std::vector<nsecs_t> timeline(length);
+    for (size_t i = 0; i < length; i++) {
+        nsecs_t timestamp = AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentTime(
+                callbackData, i);
+        timeline[i] = timestamp;
+    }
+
+    parent->onNewPresentationTime(timeline);
+
+    AChoreographer_postExtendedFrameCallback(AChoreographer_getInstance(), frameCallback, data);
+}
+
+struct ChoreographerThread : public Thread {
+    ChoreographerThread();
+    status_t start(PreviewFrameScheduler* parent);
+    virtual status_t readyToRun() override;
+    virtual bool threadLoop() override;
+
+protected:
+    virtual ~ChoreographerThread() {}
+
+private:
+    ChoreographerThread &operator=(const ChoreographerThread &);
+
+    // This only impacts the shutdown time. It won't impact the choreographer
+    // callback frequency.
+    static constexpr nsecs_t kPollingTimeoutMs = 5;
+    PreviewFrameScheduler* mParent = nullptr;
+};
+
+ChoreographerThread::ChoreographerThread() : Thread(false /*canCallJava*/) {
+}
+
+status_t ChoreographerThread::start(PreviewFrameScheduler* parent) {
+    mParent = parent;
+    return run("PreviewChoreographer");
+}
+
+status_t ChoreographerThread::readyToRun() {
+    ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+    if (AChoreographer_getInstance() == NULL) {
+        return NO_INIT;
+    }
+
+    AChoreographer_postExtendedFrameCallback(
+            AChoreographer_getInstance(), frameCallback, mParent);
+    return OK;
+}
+
+bool ChoreographerThread::threadLoop() {
+    if (exitPending()) {
+        return false;
+    }
+    ALooper_pollOnce(kPollingTimeoutMs, nullptr, nullptr, nullptr);
+    return true;
+}
+
+/**
+ * PreviewFrameScheduler implementation
+ */
+
+PreviewFrameScheduler::PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer) :
+        mParent(parent),
+        mConsumer(consumer),
+        mChoreographerThread(new ChoreographerThread()) {
+}
+
+PreviewFrameScheduler::~PreviewFrameScheduler() {
+    {
+        Mutex::Autolock l(mLock);
+        mChoreographerThread->requestExit();
+    }
+    mChoreographerThread->join();
+}
+
+status_t PreviewFrameScheduler::queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+        ANativeWindowBuffer* anwBuffer, int releaseFence) {
+    // Start choreographer thread if it's not already running.
+    if (!mChoreographerThread->isRunning()) {
+        status_t res = mChoreographerThread->start(this);
+        if (res != OK) {
+            ALOGE("%s: Failed to init choreographer thread!", __FUNCTION__);
+            return res;
+        }
+    }
+
+    {
+        Mutex::Autolock l(mLock);
+        mPendingBuffers.emplace(timestamp, transform, anwBuffer, releaseFence);
+
+        // Queue buffer to client right away if pending buffers are more than
+        // the queue depth watermark.
+        if (mPendingBuffers.size() > kQueueDepthWatermark) {
+            auto oldBuffer = mPendingBuffers.front();
+            mPendingBuffers.pop();
+
+            status_t res = queueBufferToClientLocked(oldBuffer, oldBuffer.timestamp);
+            if (res != OK) {
+                return res;
+            }
+
+            // Reset the last capture and presentation time
+            mLastCameraCaptureTime = 0;
+            mLastCameraPresentTime = 0;
+        } else {
+            ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+        }
+    }
+    return OK;
+}
+
+void PreviewFrameScheduler::onNewPresentationTime(const std::vector<nsecs_t>& timeline) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+    if (mPendingBuffers.size() > 0) {
+        auto nextBuffer = mPendingBuffers.front();
+        mPendingBuffers.pop();
+
+        // Find the best presentation time by finding the element in the
+        // choreographer timeline that's closest to the ideal presentation time.
+        // The ideal presentation time is the last presentation time + frame
+        // interval.
+        nsecs_t cameraInterval = nextBuffer.timestamp - mLastCameraCaptureTime;
+        nsecs_t idealPresentTime = (cameraInterval < kSpacingResetIntervalNs) ?
+                (mLastCameraPresentTime + cameraInterval) : nextBuffer.timestamp;
+        nsecs_t presentTime = *std::min_element(timeline.begin(), timeline.end(),
+                [idealPresentTime](nsecs_t p1, nsecs_t p2) {
+                        return std::abs(p1 - idealPresentTime) < std::abs(p2 - idealPresentTime);
+                });
+
+        status_t res = queueBufferToClientLocked(nextBuffer, presentTime);
+        ATRACE_INT(kPendingBufferTraceName, mPendingBuffers.size());
+
+        if (mParent.shouldLogError(res)) {
+            ALOGE("%s: Preview Stream: Error queueing buffer to native window:"
+                    " %s (%d)", __FUNCTION__, strerror(-res), res);
+        }
+
+        mLastCameraCaptureTime = nextBuffer.timestamp;
+        mLastCameraPresentTime = presentTime;
+    }
+}
+
+status_t PreviewFrameScheduler::queueBufferToClientLocked(
+        const BufferHolder& bufferHolder, nsecs_t timestamp) {
+    mParent.setTransform(bufferHolder.transform);
+
+    status_t res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+    if (res != OK) {
+        ALOGE("%s: Preview Stream: Error setting timestamp: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    res = mConsumer->queueBuffer(mConsumer.get(), bufferHolder.anwBuffer.get(),
+            bufferHolder.releaseFence);
+    if (res != OK) {
+        close(bufferHolder.releaseFence);
+    }
+
+    return res;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/PreviewFrameScheduler.h b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
new file mode 100644
index 0000000..c0574fd
--- /dev/null
+++ b/services/camera/libcameraservice/device3/PreviewFrameScheduler.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_PREVIEWFRAMESCHEDULER_H
+
+#include <queue>
+
+#include <android/choreographer.h>
+#include <gui/Surface.h>
+#include <gui/ISurfaceComposer.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/Looper.h>
+#include <utils/Thread.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3OutputStream;
+struct ChoreographerThread;
+
+/***
+ * Preview stream scheduler for better preview display synchronization
+ *
+ * The ideal viewfinder user experience is that frames are presented to the
+ * user in the same cadence as outputed by the camera sensor. However, the
+ * processing latency between frames could vary, due to factors such
+ * as CPU load, differences in request settings, etc. This frame processing
+ * latency results in variation in presentation of frames to the user.
+ *
+ * The PreviewFrameScheduler improves the viewfinder user experience by:
+ * 1. Cache preview buffers in the scheduler
+ * 2. For each choreographer callback, queue the oldest cached buffer with
+ *    the best matching presentation timestamp. Frame N's presentation timestamp
+ *    is the choreographer timeline timestamp closest to (Frame N-1's
+ *    presentation time + camera capture interval between frame N-1 and frame N).
+ * 3. Maintain at most 2 queue-able buffers. If the 3rd preview buffer becomes
+ *    available, queue the oldest cached buffer to the buffer queue.
+ */
+class PreviewFrameScheduler {
+  public:
+    explicit PreviewFrameScheduler(Camera3OutputStream& parent, sp<Surface> consumer);
+    virtual ~PreviewFrameScheduler();
+
+    // Queue preview buffer locally
+    status_t queuePreviewBuffer(nsecs_t timestamp, int32_t transform,
+            ANativeWindowBuffer* anwBuffer, int releaseFence);
+
+    // Callback function with a new presentation timeline from choreographer. This
+    // will trigger a locally queued buffer be sent to the buffer queue.
+    void onNewPresentationTime(const std::vector<nsecs_t>& presentationTimeline);
+
+    // Maintain at most 2 queue-able buffers
+    static constexpr int32_t kQueueDepthWatermark = 2;
+
+  private:
+    // structure holding cached preview buffer info
+    struct BufferHolder {
+        nsecs_t timestamp;
+        int32_t transform;
+        sp<ANativeWindowBuffer> anwBuffer;
+        int releaseFence;
+
+        BufferHolder(nsecs_t t, int32_t tr, ANativeWindowBuffer* anwb, int rf) :
+                timestamp(t), transform(tr), anwBuffer(anwb), releaseFence(rf) {}
+    };
+
+    status_t queueBufferToClientLocked(const BufferHolder& bufferHolder,
+            nsecs_t presentTime);
+
+    static constexpr char kPendingBufferTraceName[] = "pending_preview_buffers";
+
+    // Camera capture interval for resetting frame spacing between preview sessions
+    static constexpr nsecs_t kSpacingResetIntervalNs = 1000000000L; // 1 second
+
+    Camera3OutputStream& mParent;
+    sp<ANativeWindow> mConsumer;
+    mutable Mutex mLock;
+
+    std::queue<BufferHolder> mPendingBuffers;
+    nsecs_t mLastCameraCaptureTime = 0;
+    nsecs_t mLastCameraPresentTime = 0;
+
+    // Choreographer related
+    sp<Looper> mLooper;
+    sp<ChoreographerThread> mChoreographerThread;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index 0b5ad79..8a67b0a 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -19,12 +19,14 @@
 
 LOCAL_SHARED_LIBRARIES := \
     libbase \
+    libbinder \
     libcutils \
     libcameraservice \
     libhidlbase \
     liblog \
     libcamera_client \
     libcamera_metadata \
+    libgui \
     libui \
     libutils \
     libjpeg \
diff --git a/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
new file mode 100644
index 0000000..025521a
--- /dev/null
+++ b/services/camera/libcameraservice/tests/PreviewSchedulerTest.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PreviewSchedulerTest"
+
+#include <chrono>
+#include <thread>
+#include <utility>
+
+#include <gtest/gtest.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+
+#include <gui/BufferItemConsumer.h>
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/Surface.h>
+
+#include "../device3/Camera3OutputStream.h"
+#include "../device3/PreviewFrameScheduler.h"
+
+using namespace android;
+using namespace android::camera3;
+
+// Consumer buffer available listener
+class SimpleListener : public BufferItemConsumer::FrameAvailableListener {
+public:
+    SimpleListener(size_t frameCount): mFrameCount(frameCount) {}
+
+    void waitForFrames() {
+        Mutex::Autolock lock(mMutex);
+        while (mFrameCount > 0) {
+            mCondition.wait(mMutex);
+        }
+    }
+
+    void onFrameAvailable(const BufferItem& /*item*/) override {
+        Mutex::Autolock lock(mMutex);
+        if (mFrameCount > 0) {
+            mFrameCount--;
+            mCondition.signal();
+        }
+    }
+
+    void reset(size_t frameCount) {
+        Mutex::Autolock lock(mMutex);
+        mFrameCount = frameCount;
+    }
+private:
+    size_t mFrameCount;
+    Mutex mMutex;
+    Condition mCondition;
+};
+
+// Test the PreviewFrameScheduler functionatliy of re-timing buffers
+TEST(PreviewSchedulerTest, BasicPreviewSchedulerTest) {
+    const int ID = 0;
+    const int FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    const uint32_t WIDTH = 640;
+    const uint32_t HEIGHT = 480;
+    const int32_t TRANSFORM = 0;
+    const nsecs_t T_OFFSET = 0;
+    const android_dataspace DATASPACE = HAL_DATASPACE_UNKNOWN;
+    const camera_stream_rotation_t ROTATION = CAMERA_STREAM_ROTATION_0;
+    const String8 PHY_ID;
+    const std::unordered_set<int32_t> PIX_MODES;
+    const int BUFFER_COUNT = 4;
+    const int TOTAL_BUFFER_COUNT = BUFFER_COUNT * 2;
+
+    // Create buffer queue
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    ASSERT_NE(producer, nullptr);
+    ASSERT_NE(consumer, nullptr);
+    ASSERT_EQ(NO_ERROR, consumer->setDefaultBufferSize(WIDTH, HEIGHT));
+
+    // Set up consumer
+    sp<BufferItemConsumer> bufferConsumer = new BufferItemConsumer(consumer,
+            GRALLOC_USAGE_HW_COMPOSER, BUFFER_COUNT);
+    ASSERT_NE(bufferConsumer, nullptr);
+    sp<SimpleListener> consumerListener = new SimpleListener(BUFFER_COUNT);
+    bufferConsumer->setFrameAvailableListener(consumerListener);
+
+    // Set up producer
+    sp<Surface> surface = new Surface(producer);
+    sp<StubProducerListener> listener = new StubProducerListener();
+    ASSERT_EQ(NO_ERROR, surface->connect(NATIVE_WINDOW_API_CPU, listener));
+    sp<ANativeWindow> anw(surface);
+    ASSERT_EQ(NO_ERROR, native_window_set_buffer_count(anw.get(), TOTAL_BUFFER_COUNT));
+
+    // Create Camera3OutputStream and PreviewFrameScheduler
+    sp<Camera3OutputStream> stream = new Camera3OutputStream(ID, surface, WIDTH, HEIGHT,
+            FORMAT, DATASPACE, ROTATION, T_OFFSET, PHY_ID, PIX_MODES);
+    ASSERT_NE(stream, nullptr);
+    std::unique_ptr<PreviewFrameScheduler> scheduler =
+            std::make_unique<PreviewFrameScheduler>(*stream, surface);
+    ASSERT_NE(scheduler, nullptr);
+
+    // The pair of nsecs_t: camera timestamp delta (negative means in the past) and frame interval
+    const std::pair<nsecs_t, nsecs_t> inputTimestamps[][BUFFER_COUNT] = {
+        // 30fps, 33ms interval
+        {{-100000000LL, 33333333LL}, {-66666667LL, 33333333LL},
+          {-33333333LL, 33333333LL}, {0, 0}},
+        // 30fps, variable interval
+        {{-100000000LL, 16666667LL}, {-66666667LL, 33333333LL},
+          {-33333333LL, 50000000LL}, {0, 0}},
+        // 60fps, 16.7ms interval
+        {{-50000000LL, 16666667LL}, {-33333333LL, 16666667LL},
+          {-16666667LL, 16666667LL}, {0, 0}},
+        // 60fps, variable interval
+        {{-50000000LL, 8666667LL}, {-33333333LL, 19666667LL},
+          {-16666667LL, 20666667LL}, {0, 0}},
+    };
+
+    // Go through different use cases, and check the buffer timestamp
+    size_t iterations = sizeof(inputTimestamps)/sizeof(inputTimestamps[0]);
+    for (size_t i = 0; i < iterations; i++) {
+        // Space out different test sets to reset the frame scheduler
+        nsecs_t timeBase = systemTime() - s2ns(1) * (iterations - i);
+        nsecs_t lastQueueTime = 0;
+        nsecs_t duration = 0;
+        for (size_t j = 0; j < BUFFER_COUNT; j++) {
+            ANativeWindowBuffer* buffer = nullptr;
+            int fenceFd;
+            ASSERT_EQ(NO_ERROR, anw->dequeueBuffer(anw.get(), &buffer, &fenceFd));
+
+            // Sleep to space out queuePreviewBuffer
+            nsecs_t currentTime = systemTime();
+            if (duration > 0 && duration > currentTime - lastQueueTime) {
+                std::this_thread::sleep_for(
+                        std::chrono::nanoseconds(duration + lastQueueTime - currentTime));
+            }
+            nsecs_t timestamp = timeBase + inputTimestamps[i][j].first;
+            ASSERT_EQ(NO_ERROR,
+                    scheduler->queuePreviewBuffer(timestamp, TRANSFORM, buffer, fenceFd));
+
+            lastQueueTime = systemTime();
+            duration = inputTimestamps[i][j].second;
+        }
+
+        // Collect output timestamps, making sure they are either set by
+        // producer, or set by the scheduler.
+        consumerListener->waitForFrames();
+        nsecs_t outputTimestamps[BUFFER_COUNT];
+        for (size_t j = 0; j < BUFFER_COUNT; j++) {
+            BufferItem bufferItem;
+            ASSERT_EQ(NO_ERROR, bufferConsumer->acquireBuffer(&bufferItem, 0/*presentWhen*/));
+
+            outputTimestamps[j] = bufferItem.mTimestamp;
+            ALOGV("%s: [%zu][%zu]: input: %" PRId64 ", output: %" PRId64, __FUNCTION__,
+                  i, j, timeBase + inputTimestamps[i][j].first, bufferItem.mTimestamp);
+            ASSERT_GT(bufferItem.mTimestamp, inputTimestamps[i][j].first);
+
+            ASSERT_EQ(NO_ERROR, bufferConsumer->releaseBuffer(bufferItem));
+        }
+
+        // Check the output timestamp intervals are aligned with input intervals
+        const nsecs_t SHIFT_THRESHOLD = ms2ns(2);
+        for (size_t j = 0; j < BUFFER_COUNT - 1; j ++) {
+            nsecs_t interval_shift = outputTimestamps[j+1] - outputTimestamps[j] -
+                    (inputTimestamps[i][j+1].first - inputTimestamps[i][j].first);
+            ASSERT_LE(std::abs(interval_shift), SHIFT_THRESHOLD);
+        }
+
+        consumerListener->reset(BUFFER_COUNT);
+    }
+
+    // Disconnect the surface
+    ASSERT_EQ(NO_ERROR, surface->disconnect(NATIVE_WINDOW_API_CPU));
+}
diff --git a/services/camera/libcameraservice/tests/how_to_run.txt b/services/camera/libcameraservice/tests/how_to_run.txt
new file mode 100644
index 0000000..93239e3
--- /dev/null
+++ b/services/camera/libcameraservice/tests/how_to_run.txt
@@ -0,0 +1,5 @@
+adb root &&
+m cameraservice_test &&
+adb push $ANDROID_PRODUCT_OUT/data/nativetest/cameraservice_test/cameraservice_test \
+    /data/nativetest/cameraservice_test/arm64/cameraservice_test &&
+adb shell /data/nativetest/cameraservice_test/arm64/cameraservice_test
\ No newline at end of file
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 192e241..1053327 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -21,7 +21,7 @@
 #include <camera/camera2/OutputConfiguration.h>
 #include <camera/camera2/SessionConfiguration.h>
 #include <camera/camera2/SubmitInfo.h>
-#include <android/hardware/camera/device/3.7/types.h>
+#include <android/hardware/camera/device/3.8/types.h>
 #include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
 #include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
 
diff --git a/services/mediaextractor/TEST_MAPPING b/services/mediaextractor/TEST_MAPPING
new file mode 100644
index 0000000..7a66eeb
--- /dev/null
+++ b/services/mediaextractor/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+    "presubmit": [
+        {
+            "name": "CtsMediaTranscodingTestCases"
+        }
+    ]
+}
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 69ec947..cd6af9f 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -1225,3 +1225,29 @@
         ASSERT_EQ(id, validateId.validateId(id));
     }
 }
+
+TEST(mediametrics_tests, ErrorConversion) {
+    constexpr status_t errors[] = {
+        NO_ERROR,
+        BAD_VALUE,
+        DEAD_OBJECT,
+        NO_MEMORY,
+        PERMISSION_DENIED,
+        INVALID_OPERATION,
+        WOULD_BLOCK,
+        UNKNOWN_ERROR,
+    };
+
+    auto roundTrip = [](status_t status) {
+        return android::mediametrics::errorStringToStatus(
+                android::mediametrics::statusToErrorString(status));
+    };
+
+    // Primary status error categories.
+    for (const auto error : errors) {
+        ASSERT_EQ(error, roundTrip(error));
+    }
+
+    // Status errors specially considered.
+    ASSERT_EQ(DEAD_OBJECT, roundTrip(FAILED_TRANSACTION));
+}
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 20a4c34..80e4296 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -70,6 +70,7 @@
         "framework-permission-aidl-cpp",
         "libaudioclient_aidl_conversion",
         "packagemanager_aidl-cpp",
+        "android.media.audio.common.types-V1-cpp",
     ],
 
     export_shared_lib_headers: [
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index cb903f2..fb5bfa3 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -346,7 +346,7 @@
     return res;
 }
 
-::ndk::ScopedAStatus TunerFilter::createSharedFilter(string* _aidl_return) {
+::ndk::ScopedAStatus TunerFilter::acquireSharedFilterToken(string* _aidl_return) {
     Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
@@ -369,7 +369,7 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
-::ndk::ScopedAStatus TunerFilter::releaseSharedFilter(const string& /* in_filterToken */) {
+::ndk::ScopedAStatus TunerFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
     Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
@@ -405,6 +405,17 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
+::ndk::ScopedAStatus TunerFilter::setDelayHint(const FilterDelayHint& in_hint) {
+    Mutex::Autolock _l(mLock);
+    if (mFilter == nullptr) {
+        ALOGE("IFilter is not initialized");
+        return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+    }
+
+    return mFilter->setDelayHint(in_hint);
+}
+
 bool TunerFilter::isSharedFilterAllowed(int callingPid) {
     return mShared && mClientPid != callingPid;
 }
@@ -453,8 +464,10 @@
 
 void TunerFilter::FilterCallback::detachSharedFilterCallback() {
     Mutex::Autolock _l(mCallbackLock);
-    mTunerFilterCallback = mOriginalCallback;
-    mOriginalCallback = nullptr;
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback = mOriginalCallback;
+        mOriginalCallback = nullptr;
+    }
 }
 
 }  // namespace tuner
diff --git a/services/tuner/TunerFilter.h b/services/tuner/TunerFilter.h
index c6b41d3..529c191 100644
--- a/services/tuner/TunerFilter.h
+++ b/services/tuner/TunerFilter.h
@@ -23,6 +23,7 @@
 #include <aidl/android/hardware/tv/tuner/DemuxFilterSettings.h>
 #include <aidl/android/hardware/tv/tuner/DemuxFilterStatus.h>
 #include <aidl/android/hardware/tv/tuner/DemuxFilterType.h>
+#include <aidl/android/hardware/tv/tuner/FilterDelayHint.h>
 #include <aidl/android/hardware/tv/tuner/IFilter.h>
 #include <aidl/android/media/tv/tuner/BnTunerFilter.h>
 #include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
@@ -37,6 +38,7 @@
 using ::aidl::android::hardware::tv::tuner::DemuxFilterSettings;
 using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
 using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
 using ::aidl::android::hardware::tv::tuner::IFilter;
 using ::aidl::android::media::tv::tuner::BnTunerFilter;
 using ::android::Mutex;
@@ -91,9 +93,10 @@
     ::ndk::ScopedAStatus stop() override;
     ::ndk::ScopedAStatus flush() override;
     ::ndk::ScopedAStatus close() override;
-    ::ndk::ScopedAStatus createSharedFilter(string* _aidl_return) override;
-    ::ndk::ScopedAStatus releaseSharedFilter(const string& in_filterToken) override;
+    ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+    ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
     ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+    ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
 
     bool isSharedFilterAllowed(int32_t pid);
     void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
index 0450a57..dc40f03 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
@@ -23,6 +23,7 @@
 import android.hardware.tv.tuner.DemuxFilterType;
 import android.hardware.tv.tuner.AvStreamType;
 import android.hardware.tv.tuner.DemuxFilterMonitorEventType;
+import android.hardware.tv.tuner.FilterDelayHint;
 
 /**
  * Tuner Filter interface handles tuner related operations.
@@ -101,19 +102,19 @@
     void close();
 
     /**
-     * Create a new SharedFilter instance.
+     * Acquire a new SharedFilter token.
      *
      * @return a token of the newly created SharedFilter instance.
      */
-    String createSharedFilter();
+    String acquireSharedFilterToken();
 
     /**
-     * Release a SharedFilter instance.
+     * Free a SharedFilter token.
      *
-     * @param filterToken the SharedFilter will be released.
+     * @param filterToken the SharedFilter token will be released.
      * @return a token of the newly created SharedFilter instance.
      */
-    void releaseSharedFilter(in String filterToken);
+    void freeSharedFilterToken(in String filterToken);
 
     /**
      * Get filter type.
@@ -121,4 +122,6 @@
      * @return filter type.
      */
     DemuxFilterType getFilterType();
+
+    void setDelayHint(in FilterDelayHint hint);
 }
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
index dddd025..7b76093 100644
--- a/services/tuner/hidl/TunerHidlFilter.cpp
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -19,6 +19,7 @@
 #include "TunerHidlFilter.h"
 
 #include <aidl/android/hardware/tv/tuner/Constant.h>
+#include <aidl/android/hardware/tv/tuner/DemuxScIndex.h>
 #include <aidl/android/hardware/tv/tuner/Result.h>
 #include <aidlcommonsupport/NativeHandle.h>
 #include <binder/IPCThreadState.h>
@@ -55,6 +56,7 @@
 using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterSettingsFilterSettings;
 using ::aidl::android::hardware::tv::tuner::DemuxMmtpFilterType;
 using ::aidl::android::hardware::tv::tuner::DemuxPid;
+using ::aidl::android::hardware::tv::tuner::DemuxScIndex;
 using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettings;
 using ::aidl::android::hardware::tv::tuner::DemuxTlvFilterSettingsFilterSettings;
 using ::aidl::android::hardware::tv::tuner::DemuxTsFilterSettings;
@@ -521,7 +523,7 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
-::ndk::ScopedAStatus TunerHidlFilter::createSharedFilter(string* _aidl_return) {
+::ndk::ScopedAStatus TunerHidlFilter::acquireSharedFilterToken(string* _aidl_return) {
     Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
@@ -545,7 +547,7 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
-::ndk::ScopedAStatus TunerHidlFilter::releaseSharedFilter(const string& /* in_filterToken */) {
+::ndk::ScopedAStatus TunerHidlFilter::freeSharedFilterToken(const string& /* in_filterToken */) {
     Mutex::Autolock _l(mLock);
     if (mFilter == nullptr) {
         ALOGE("IFilter is not initialized");
@@ -580,6 +582,12 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
+::ndk::ScopedAStatus TunerHidlFilter::setDelayHint(const FilterDelayHint&) {
+    // setDelayHint is not supported in HIDL HAL
+    return ::ndk::ScopedAStatus::fromServiceSpecificError(
+                static_cast<int32_t>(Result::UNAVAILABLE));
+}
+
 bool TunerHidlFilter::isSharedFilterAllowed(int callingPid) {
     return mShared && mClientPid != callingPid;
 }
@@ -866,16 +874,26 @@
         const DemuxFilterRecordSettings& settings) {
     HidlDemuxFilterRecordSettings record{
             .tsIndexMask = static_cast<uint32_t>(settings.tsIndexMask),
-            .scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType),
     };
 
     switch (settings.scIndexMask.getTag()) {
     case DemuxFilterScIndexMask::scIndex: {
+        record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
         record.scIndexMask.sc(
                 static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scIndex>()));
         break;
     }
+    case DemuxFilterScIndexMask::scAvc: {
+        record.scIndexType = HidlDemuxRecordScIndexType::SC;
+        uint32_t index =
+                static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scAvc>());
+        // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+        index = index << 4;
+        record.scIndexMask.sc(index);
+        break;
+    }
     case DemuxFilterScIndexMask::scHevc: {
+        record.scIndexType = static_cast<HidlDemuxRecordScIndexType>(settings.scIndexType);
         record.scIndexMask.scHevc(
                 static_cast<uint32_t>(settings.scIndexMask.get<DemuxFilterScIndexMask::scHevc>()));
         break;
@@ -941,8 +959,10 @@
 
 void TunerHidlFilter::FilterCallback::detachSharedFilterCallback() {
     Mutex::Autolock _l(mCallbackLock);
-    mTunerFilterCallback = mOriginalCallback;
-    mOriginalCallback = nullptr;
+    if (mTunerFilterCallback != nullptr && mOriginalCallback != nullptr) {
+        mTunerFilterCallback = mOriginalCallback;
+        mOriginalCallback = nullptr;
+    }
 }
 
 /////////////// FilterCallback Helper Methods ///////////////////////
@@ -1092,8 +1112,13 @@
         DemuxFilterScIndexMask scIndexMask;
         if (tsRecordEvent.scIndexMask.getDiscriminator() ==
             HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
-            scIndexMask.set<DemuxFilterScIndexMask::scIndex>(
-                    static_cast<int32_t>(tsRecordEvent.scIndexMask.sc()));
+            int32_t hidlScIndex = static_cast<int32_t>(tsRecordEvent.scIndexMask.sc());
+            if (hidlScIndex <= static_cast<int32_t>(DemuxScIndex::SEQUENCE)) {
+                scIndexMask.set<DemuxFilterScIndexMask::scIndex>(hidlScIndex);
+            } else {
+                // HIDL HAL starting from 1 << 4; AIDL starting from 1 << 0.
+                scIndexMask.set<DemuxFilterScIndexMask::scAvc>(hidlScIndex >> 4);
+            }
         } else if (tsRecordEvent.scIndexMask.getDiscriminator() ==
                    HidlDemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
             scIndexMask.set<DemuxFilterScIndexMask::scHevc>(
diff --git a/services/tuner/hidl/TunerHidlFilter.h b/services/tuner/hidl/TunerHidlFilter.h
index 548b753..b8fad22 100644
--- a/services/tuner/hidl/TunerHidlFilter.h
+++ b/services/tuner/hidl/TunerHidlFilter.h
@@ -52,6 +52,7 @@
 using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
 using ::aidl::android::hardware::tv::tuner::DemuxFilterType;
 using ::aidl::android::hardware::tv::tuner::DemuxIpAddressIpAddress;
+using ::aidl::android::hardware::tv::tuner::FilterDelayHint;
 using ::aidl::android::media::tv::tuner::BnTunerFilter;
 using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
 using ::android::Mutex;
@@ -182,9 +183,10 @@
     ::ndk::ScopedAStatus stop() override;
     ::ndk::ScopedAStatus flush() override;
     ::ndk::ScopedAStatus close() override;
-    ::ndk::ScopedAStatus createSharedFilter(string* _aidl_return) override;
-    ::ndk::ScopedAStatus releaseSharedFilter(const string& in_filterToken) override;
+    ::ndk::ScopedAStatus acquireSharedFilterToken(string* _aidl_return) override;
+    ::ndk::ScopedAStatus freeSharedFilterToken(const string& in_filterToken) override;
     ::ndk::ScopedAStatus getFilterType(DemuxFilterType* _aidl_return) override;
+    ::ndk::ScopedAStatus setDelayHint(const FilterDelayHint& in_hint) override;
 
     bool isSharedFilterAllowed(int32_t pid);
     void attachSharedFilterCallback(const shared_ptr<ITunerFilterCallback>& in_cb);
diff --git a/services/tuner/mediatuner.rc b/services/tuner/mediatuner.rc
index fd30618..6a3e199 100644
--- a/services/tuner/mediatuner.rc
+++ b/services/tuner/mediatuner.rc
@@ -2,4 +2,7 @@
     class main
     group media
     ioprio rt 4
+    onrestart restart vendor.tuner-hal-1-0
+    onrestart restart vendor.tuner-hal-1-1
+    onrestart restart vendor.tuner-default
     task_profiles ProcessCapacityHigh HighPerformance