Merge "libaudiohal: Handle pause-flush-resume for offloaded streams" am: e7c2005f99 am: e083c73642

Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/2455445

Change-Id: I892ea17c0d15212ec1af25c85ade16f5956b57e2
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 23d90cc..02047ae 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -696,7 +696,7 @@
     CameraMetadata rawMetadata;
     int targetSdkVersion = android_get_application_target_sdk_version();
     binder::Status serviceRet = cs->getCameraCharacteristics(String16(cameraIdStr),
-            targetSdkVersion, /*overrideToPortrait*/true, &rawMetadata);
+            targetSdkVersion, /*overrideToPortrait*/false, &rawMetadata);
     if (!serviceRet.isOk()) {
         switch(serviceRet.serviceSpecificErrorCode()) {
             case hardware::ICameraService::ERROR_DISCONNECTED:
@@ -748,7 +748,7 @@
     binder::Status serviceRet = cs->connectDevice(
             callbacks, String16(cameraId), String16(""), {},
             hardware::ICameraService::USE_CALLING_UID, /*oomScoreOffset*/0,
-            targetSdkVersion, /*overrideToPortrait*/true, /*out*/&deviceRemote);
+            targetSdkVersion, /*overrideToPortrait*/false, /*out*/&deviceRemote);
 
     if (!serviceRet.isOk()) {
         ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string());
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index d4025e5..78ea2a1 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -42,9 +42,15 @@
         mWidth(width), mHeight(height),
         mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
         mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
-        mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
-        mSize(hasData ? (bpp * width * height) : 0),
-        mIccSize(iccSize), mBitDepth(bitDepth) {
+        mRotationAngle(angle), mBytesPerPixel(bpp), mIccSize(iccSize),
+        mBitDepth(bitDepth) {
+            uint32_t multVal;
+            mRowBytes = __builtin_mul_overflow(bpp, width, &multVal) ? 0 : multVal;
+            mSize = __builtin_mul_overflow(multVal, height, &multVal) ? 0 : multVal;
+            if (hasData && (mRowBytes == 0 || mSize == 0)) {
+                ALOGE("Frame rowBytes/ size overflow %dx%d bpp %d", width, height, bpp);
+                android_errorWriteLog(0x534e4554, "233006499");
+            }
     }
 
     void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index f64aedf..9636949 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -21,6 +21,7 @@
       "ScreenHeadFusion.cpp",
       "StillnessDetector.cpp",
       "Twist.cpp",
+      "VectorRecorder.cpp",
     ],
     shared_libs: [
         "libaudioutils",
@@ -35,6 +36,9 @@
     export_header_lib_headers: [
         "libeigen",
     ],
+    cflags: [
+        "-Wthread-safety",
+    ],
 }
 
 cc_library {
@@ -84,6 +88,7 @@
     ],
     shared_libs: [
         "libaudioutils",
+        "libbase", // StringAppendF
         "libheadtracking",
     ],
 }
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
index 299192f..b9dd0b8 100644
--- a/media/libheadtracking/HeadTrackingProcessor-test.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -15,10 +15,10 @@
  */
 
 #include "media/HeadTrackingProcessor.h"
+#include "media/QuaternionUtil.h"
 
 #include <gtest/gtest.h>
 
-#include "QuaternionUtil.h"
 #include "TestUtil.h"
 
 namespace android {
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index 101b825..9db4afa 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -18,10 +18,10 @@
 #include <android-base/stringprintf.h>
 #include <audio_utils/SimpleLog.h>
 #include "media/HeadTrackingProcessor.h"
+#include "media/QuaternionUtil.h"
 
 #include "ModeSelector.h"
 #include "PoseBias.h"
-#include "QuaternionUtil.h"
 #include "ScreenHeadFusion.h"
 #include "StillnessDetector.h"
 
@@ -97,7 +97,7 @@
             mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
             // Whenever the screen is unstable, recenter the head pose.
             if (!screenStable) {
-                recenter(true, false);
+                recenter(true, false, "calculate: screen movement");
             }
             mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
                                                    worldToLogicalScreen);
@@ -109,7 +109,7 @@
             // Auto-recenter.
             bool headStable = mHeadStillnessDetector.calculate(timestamp);
             if (headStable || !screenStable) {
-                recenter(true, false);
+                recenter(true, false, "calculate: head movement");
                 worldToHead = mHeadPoseBias.getOutput();
             }
 
@@ -139,16 +139,16 @@
 
     HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
 
-    void recenter(bool recenterHead, bool recenterScreen) override {
+    void recenter(bool recenterHead, bool recenterScreen, std::string source) override {
         if (recenterHead) {
             mHeadPoseBias.recenter();
             mHeadStillnessDetector.reset();
-            mLocalLog.log("recenter Head");
+            mLocalLog.log("recenter Head from %s", source.c_str());
         }
         if (recenterScreen) {
             mScreenPoseBias.recenter();
             mScreenStillnessDetector.reset();
-            mLocalLog.log("recenter Screen");
+            mLocalLog.log("recenter Screen from %s", source.c_str());
         }
 
         // If a sensor being recentered is included in the current mode, apply rate limiting to
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
index a136e6b..6925908 100644
--- a/media/libheadtracking/ModeSelector-test.cpp
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -18,7 +18,7 @@
 
 #include <gtest/gtest.h>
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 namespace android {
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
index a9e18ce..29dba29 100644
--- a/media/libheadtracking/Pose-test.cpp
+++ b/media/libheadtracking/Pose-test.cpp
@@ -18,7 +18,7 @@
 
 #include <gtest/gtest.h>
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 using android::media::Pose3f;
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
index 4a4b56a..e03725b 100644
--- a/media/libheadtracking/Pose.cpp
+++ b/media/libheadtracking/Pose.cpp
@@ -16,8 +16,8 @@
 #include <android-base/stringprintf.h>
 
 #include "media/Pose.h"
+#include "media/QuaternionUtil.h"
 #include "media/Twist.h"
-#include "QuaternionUtil.h"
 
 namespace android {
 namespace media {
diff --git a/media/libheadtracking/PoseBias-test.cpp b/media/libheadtracking/PoseBias-test.cpp
index 9f42a2c..659dda0 100644
--- a/media/libheadtracking/PoseBias-test.cpp
+++ b/media/libheadtracking/PoseBias-test.cpp
@@ -17,7 +17,8 @@
 #include <gtest/gtest.h>
 
 #include "PoseBias.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 namespace android {
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
index df0a05f..521e3eb 100644
--- a/media/libheadtracking/PoseDriftCompensator-test.cpp
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -18,7 +18,8 @@
 #include <cmath>
 
 #include "PoseDriftCompensator.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 namespace android {
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
index 0e90cad..2775790 100644
--- a/media/libheadtracking/PoseDriftCompensator.cpp
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -18,7 +18,7 @@
 
 #include <cmath>
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 
 namespace android {
 namespace media {
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
index f306183..ded874a 100644
--- a/media/libheadtracking/PoseRateLimiter-test.cpp
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -17,7 +17,8 @@
 #include <gtest/gtest.h>
 
 #include "PoseRateLimiter.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 namespace android {
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
index e79e54a..cfeca00 100644
--- a/media/libheadtracking/QuaternionUtil-test.cpp
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -16,7 +16,7 @@
 
 #include <gtest/gtest.h>
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 using Eigen::Quaternionf;
@@ -51,6 +51,92 @@
     EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
 }
 
+// Float precision necessitates this precision (1e-4f fails)
+constexpr float NEAR = 1e-3f;
+
+TEST(QuaternionUtil, quaternionToAngles_basic) {
+    float pitch, roll, yaw;
+
+   // angles as reported.
+   // choose 11 angles between -M_PI / 2 to M_PI / 2
+    for (int step = -5; step <= 5; ++step) {
+        const float angle = M_PI * step * 0.1f;
+
+        quaternionToAngles(rotationVectorToQuaternion({angle, 0.f, 0.f}), &pitch, &roll, &yaw);
+        EXPECT_NEAR(angle, pitch, NEAR);
+        EXPECT_NEAR(0.f, roll, NEAR);
+        EXPECT_NEAR(0.f, yaw, NEAR);
+
+        quaternionToAngles(rotationVectorToQuaternion({0.f, angle, 0.f}), &pitch, &roll, &yaw);
+        EXPECT_NEAR(0.f, pitch, NEAR);
+        EXPECT_NEAR(angle, roll, NEAR);
+        EXPECT_NEAR(0.f, yaw, NEAR);
+
+        quaternionToAngles(rotationVectorToQuaternion({0.f, 0.f, angle}), &pitch, &roll, &yaw);
+        EXPECT_NEAR(0.f, pitch, NEAR);
+        EXPECT_NEAR(0.f, roll, NEAR);
+        EXPECT_NEAR(angle, yaw, NEAR);
+    }
+
+    // Generates a debug string
+    const std::string s = quaternionToAngles<true /* DEBUG */>(
+            rotationVectorToQuaternion({M_PI, 0.f, 0.f}), &pitch, &roll, &yaw);
+    ASSERT_FALSE(s.empty());
+}
+
+TEST(QuaternionUtil, quaternionToAngles_zaxis) {
+    float pitch, roll, yaw;
+
+    for (int rot_step = -10; rot_step <= 10; ++rot_step) {
+        const float rot_angle = M_PI * rot_step * 0.1f;
+        // pitch independent of world Z rotation
+
+        // We don't test the boundaries of pitch +-M_PI/2 as roll can become
+        // degenerate and atan(0, 0) may report 0, PI, or -PI.
+        for (int step = -4; step <= 4; ++step) {
+            const float angle = M_PI * step * 0.1f;
+            auto q = rotationVectorToQuaternion({angle, 0.f, 0.f});
+            auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle});
+
+            // Sequential active rotations (on world frame) compose as R_2 * R_1.
+            quaternionToAngles(world_z * q, &pitch, &roll, &yaw);
+
+            EXPECT_NEAR(angle, pitch, NEAR);
+            EXPECT_NEAR(0.f, roll, NEAR);
+       }
+
+        // roll independent of world Z rotation
+        for (int step = -5; step <= 5; ++step) {
+            const float angle = M_PI * step * 0.1f;
+            auto q = rotationVectorToQuaternion({0.f, angle, 0.f});
+            auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle});
+
+            // Sequential active rotations (on world frame) compose as R_2 * R_1.
+            quaternionToAngles(world_z * q, &pitch, &roll, &yaw);
+
+            EXPECT_NEAR(0.f, pitch, NEAR);
+            EXPECT_NEAR(angle, roll, NEAR);
+
+            // Convert extrinsic (world-based) active rotations to a sequence of
+            // intrinsic rotations (each rotation based off of previous rotation
+            // frame).
+            //
+            // R_1 * R_intrinsic = R_extrinsic * R_1
+            //    implies
+            // R_intrinsic = (R_1)^-1 R_extrinsic R_1
+            //
+            auto world_z_intrinsic = rotationVectorToQuaternion(
+                    q.inverse() * Vector3f(0.f, 0.f, rot_angle));
+
+            // Sequential intrinsic rotations compose as R_1 * R_2.
+            quaternionToAngles(q * world_z_intrinsic, &pitch, &roll, &yaw);
+
+            EXPECT_NEAR(0.f, pitch, NEAR);
+            EXPECT_NEAR(angle, roll, NEAR);
+        }
+    }
+}
+
 }  // namespace
 }  // namespace media
 }  // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
index 5d090de..e245c80 100644
--- a/media/libheadtracking/QuaternionUtil.cpp
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 
 #include <cassert>
 
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
deleted file mode 100644
index f7a2ca9..0000000
--- a/media/libheadtracking/QuaternionUtil.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#pragma once
-
-#include <Eigen/Geometry>
-
-namespace android {
-namespace media {
-
-/**
- * Converts a rotation vector to an equivalent quaternion.
- * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
- * magnitude the rotation angle (in radians) around that axis.
- */
-Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
-
-/**
- * Converts a quaternion to an equivalent rotation vector.
- * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
- * magnitude the rotation angle (in radians) around that axis.
- */
-Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
-
-/**
- * Returns a quaternion representing a rotation around the X-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateX(float angle);
-
-/**
- * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateY(float angle);
-
-/**
- * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateZ(float angle);
-
-}  // namespace media
-}  // namespace android
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
index 31d469c..8a29027 100644
--- a/media/libheadtracking/SensorPoseProvider.cpp
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -32,7 +32,7 @@
 #include <sensor/SensorManager.h>
 #include <utils/Looper.h>
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 
 namespace android {
 namespace media {
diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp
index b6cd479..56e7b4e 100644
--- a/media/libheadtracking/StillnessDetector-test.cpp
+++ b/media/libheadtracking/StillnessDetector-test.cpp
@@ -16,8 +16,9 @@
 
 #include <gtest/gtest.h>
 
-#include "QuaternionUtil.h"
 #include "StillnessDetector.h"
+
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 namespace android {
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
index 7984e1e..9fbf81f 100644
--- a/media/libheadtracking/Twist-test.cpp
+++ b/media/libheadtracking/Twist-test.cpp
@@ -16,9 +16,7 @@
 
 #include "media/Twist.h"
 
-#include <gtest/gtest.h>
-
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 #include "TestUtil.h"
 
 using Eigen::Quaternionf;
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
index 664c4d5..63b9e69 100644
--- a/media/libheadtracking/Twist.cpp
+++ b/media/libheadtracking/Twist.cpp
@@ -16,7 +16,7 @@
 
 #include "media/Twist.h"
 
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
 
 namespace android {
 namespace media {
diff --git a/media/libheadtracking/VectorRecorder.cpp b/media/libheadtracking/VectorRecorder.cpp
new file mode 100644
index 0000000..5d0588e
--- /dev/null
+++ b/media/libheadtracking/VectorRecorder.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/VectorRecorder.h"
+
+namespace android::media {
+
+// Convert data to string with level indentation.
+// No need for a lock as the SimpleLog is thread-safe.
+std::string VectorRecorder::toString(size_t indent) const {
+    return mRecordLog.dumpToString(std::string(indent + 1, ' ').c_str(), mMaxLocalLogLine);
+}
+
+// Record into local log when it is time.
+void VectorRecorder::record(const std::vector<float>& record) {
+    if (record.size() != mVectorSize) return;
+
+    // Protect against concurrent calls to record().
+    std::lock_guard lg(mLock);
+
+    // if it is time, record average data and reset.
+    if (shouldRecordLog_l()) {
+        sumToAverage_l();
+        mRecordLog.log(
+                "mean: %s, min: %s, max %s, calculated %zu samples in %0.4f second(s)",
+                toString(mSum).c_str(),
+                toString(mMin).c_str(),
+                toString(mMax).c_str(),
+                mNumberOfSamples,
+                mNumberOfSecondsSinceFirstSample.count());
+        resetRecord_l();
+    }
+
+    // update stream average.
+    if (mNumberOfSamples++ == 0) {
+        mFirstSampleTimestamp = std::chrono::steady_clock::now();
+        for (size_t i = 0; i < mVectorSize; ++i) {
+            const float value = record[i];
+            mSum[i] += value;
+            mMax[i] = value;
+            mMin[i] = value;
+        }
+    } else {
+        for (size_t i = 0; i < mVectorSize; ++i) {
+            const float value = record[i];
+            mSum[i] += value;
+            mMax[i] = std::max(mMax[i], value);
+            mMin[i] = std::min(mMin[i], value);
+        }
+    }
+}
+
+bool VectorRecorder::shouldRecordLog_l() {
+    mNumberOfSecondsSinceFirstSample = std::chrono::duration_cast<std::chrono::seconds>(
+            std::chrono::steady_clock::now() - mFirstSampleTimestamp);
+    return mNumberOfSecondsSinceFirstSample >= mRecordThreshold;
+}
+
+void VectorRecorder::resetRecord_l() {
+    mSum.assign(mVectorSize, 0);
+    mMax.assign(mVectorSize, 0);
+    mMin.assign(mVectorSize, 0);
+    mNumberOfSamples = 0;
+    mNumberOfSecondsSinceFirstSample = std::chrono::seconds(0);
+}
+
+void VectorRecorder::sumToAverage_l() {
+    if (mNumberOfSamples == 0) return;
+    const float reciprocal = 1.f / mNumberOfSamples;
+    for (auto& p : mSum) {
+        p *= reciprocal;
+    }
+}
+
+}  // namespace android::media
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 8ef8ab0..b4c78a0 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -95,7 +95,8 @@
     /**
      * This causes the current poses for both the head and/or screen to be considered "center".
      */
-    virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
+    virtual void recenter(
+            bool recenterHead = true, bool recenterScreen = true, std::string source = "") = 0;
 
     /**
      * Dump HeadTrackingProcessor parameters under caller lock.
diff --git a/media/libheadtracking/include/media/QuaternionUtil.h b/media/libheadtracking/include/media/QuaternionUtil.h
new file mode 100644
index 0000000..a711d17
--- /dev/null
+++ b/media/libheadtracking/include/media/QuaternionUtil.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <android-base/stringprintf.h>
+#include <Eigen/Geometry>
+#include <media/Pose.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+/**
+ * Compute separate roll, pitch, and yaw angles from a quaternion
+ *
+ * The roll, pitch, and yaw follow standard 3DOF virtual reality definitions
+ * with angles increasing counter-clockwise by the right hand rule.
+ *
+ * https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ *
+ * The roll, pitch, and yaw angles are calculated separately from the device frame
+ * rotation from the world frame.  This is not to be confused with the
+ * intrinsic Euler xyz roll, pitch, yaw 'nautical' angles.
+ *
+ * The input quarternion is the active rotation that transforms the
+ * World/Stage frame to the Head/Screen frame.
+ *
+ * The input quaternion may come from two principal sensors: DEVICE and HEADSET
+ * and are interpreted as below.
+ *
+ * DEVICE SENSOR
+ *
+ * Android sensor stack assumes device coordinates along the x/y axis.
+ *
+ * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_rotation_vector:
+ *
+ * Looking down from the clouds. Android Device coordinate system (not used)
+ *        DEVICE --> X (Y goes through top speaker towards the observer)
+ *           | Z
+ *           V
+ *         USER
+ *
+ * Internally within this library, we transform the device sensor coordinate
+ * system by rotating the coordinate system around the X axis by -M_PI/2.
+ * This aligns the device coordinate system to match that of the
+ * Head Tracking sensor (see below), should the user be facing the device in
+ * natural (phone == portrait, tablet == ?) orientation.
+ *
+ * Looking down from the clouds. Spatializer device frame.
+ *           Y
+ *           ^
+ *           |
+ *        DEVICE --> X (Z goes through top of the DEVICE towards the observer)
+ *
+ *         USER
+ *
+ * The reference world frame is the device in vertical
+ * natural (phone == portrait) orientation with the top pointing straight
+ * up from the ground and the front-to-back direction facing north.
+ * The world frame is presumed locally fixed by magnetic and gravitational reference.
+ *
+ * HEADSET SENSOR
+ * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_head_tracker:
+ *
+ * Looking down from the clouds. Headset frame.
+ *           Y
+ *           ^
+ *           |
+ *         USER ---> X
+ *         (Z goes through the top of the USER head towards the observer)
+ *
+ * The Z axis goes from the neck to the top of the head, the X axis goes
+ * from the left ear to the right ear, the Y axis goes from the back of the
+ * head through the nose.
+ *
+ * Typically for a headset sensor, the X and Y axes have some arbitrary fixed
+ * reference.
+ *
+ * ROLL
+ * Roll is the counter-clockwise L/R motion around the Y axis (hence ZX plane).
+ * The right hand convention means the plane is ZX not XZ.
+ * This can be considered the azimuth angle in spherical coordinates
+ * with Pitch being the elevation angle.
+ *
+ * Roll has a range of -M_PI to M_PI radians.
+ *
+ * Rolling a device changes between portrait and landscape
+ * modes, and for L/R speakers will limit the amount of crosstalk cancellation.
+ * Roll increases as the device (if vertical like a coin) rolls from left to right.
+ *
+ * By this definition, Roll is less accurate when the device is flat
+ * on a table rather than standing on edge.
+ * When perfectly flat on the table, roll may report as 0, M_PI, or -M_PI
+ * due ambiguity / degeneracy of atan(0, 0) in this case (the device Y axis aligns with
+ * the world Z axis), but exactly flat rarely occurs.
+ *
+ * Roll for a headset is the angle the head is inclined to the right side
+ * (like sleeping).
+ *
+ * PITCH
+ * Pitch is the Surface normal Y deviation (along the Z axis away from the earth).
+ * This can be considered the elevation angle in spherical coordinates using
+ * Roll as the azimuth angle.
+ *
+ * Pitch for a device determines whether the device is "upright" or lying
+ * flat on the table (i.e. surface normal).  Pitch is 0 when upright, decreases
+ * as the device top moves away from the user to -M_PI/2 when lying down face up.
+ * Pitch increases from 0 to M_PI/2 when the device tilts towards the user, and is
+ * M_PI/2 degrees when face down.
+ *
+ * Pitch for a headset is the user tilting the head/chin up or down,
+ * like nodding.
+ *
+ * Pitch has a range of -M_PI/2, M_PI/2 radians.
+ *
+ * YAW
+ * Yaw is the rotational component along the earth's XY tangential plane,
+ * where the Z axis points radially away from the earth.
+ *
+ * Yaw has a range of -M_PI to M_PI radians.  If used for azimuth angle in
+ * spherical coordinates, the elevation angle may be derived from the Z axis.
+ *
+ * A positive increase means the phone is rotating from right to left
+ * when considered flat on the table.
+ * (headset: the user is rotating their head to look left).
+ * If left speaker or right earbud is pointing straight up or down,
+ * this value is imprecise and Pitch or Roll is a more useful measure.
+ *
+ * Yaw for a device is like spinning a vertical device along the axis of
+ * gravity, like spinning a coin.  Yaw increases as the coin / device
+ * spins from right to left, rotating around the Z axis.
+ *
+ * Yaw for a headset is the user turning the head to look left or right
+ * like shaking the head for no. Yaw is the primary angle for a binaural
+ * head tracking device.
+ *
+ * @param q input active rotation Eigen quaternion.
+ * @param pitch output set to pitch if not nullptr
+ * @param roll output set to roll if not nullptr
+ * @param yaw output set to yaw if not nullptr
+ * @return (DEBUG==true) a debug string with intermediate transformation matrix
+ *                       interpreted as the unit basis vectors.
+ */
+
+// DEBUG returns a debug string for analysis.
+// We save unneeded rotation matrix computation by keeping the DEBUG option constexpr.
+template <bool DEBUG = false>
+auto quaternionToAngles(const Eigen::Quaternionf& q, float *pitch, float *roll, float *yaw) {
+    /*
+     * The quaternion here is the active rotation that transforms from the world frame
+     * to the device frame: the observer remains in the world frame,
+     * and the device (frame) moves.
+     *
+     * We use this to map device coordinates to world coordinates.
+     *
+     * Device:  We transform the device right speaker (X == 1), top speaker (Z == 1),
+     * and surface inwards normal (Y == 1) positions to the world frame.
+     *
+     * Headset: We transform the headset right bud (X == 1), top (Z == 1) and
+     * nose normal (Y == 1) positions to the world frame.
+     *
+     * This is the same as the world frame coordinates of the
+     *  unit device vector in the X dimension (ux),
+     *  unit device vector in the Y dimension (uy),
+     *  unit device vector in the Z dimension (uz).
+     *
+     * Rather than doing the rotation on unit vectors individually,
+     * one can simply use the columns of the rotation matrix of
+     * the world-to-body quaternion, so the computation is exceptionally fast.
+     *
+     * Furthermore, Eigen inlines the "toRotationMatrix" method
+     * and we rely on unused expression removal for efficiency
+     * and any elements not used should not be computed.
+     *
+     * Side note: For applying a rotation to several points,
+     * it is more computationally efficient to extract and
+     * use the rotation matrix form than the quaternion.
+     * So use of the rotation matrix is good for many reasons.
+     */
+    const auto rotation = q.toRotationMatrix();
+
+    /*
+     * World location of unit vector right speaker assuming the phone is situated
+     * natural (phone == portrait) mode.
+     * (headset: right bud).
+     *
+     * auto ux = q.rotation() * Eigen::Vector3f{1.f, 0.f, 0.f};
+     *         = rotation.col(0);
+     */
+    [[maybe_unused]] const auto ux_0 = rotation.coeff(0, 0);
+    [[maybe_unused]] const auto ux_1 = rotation.coeff(1, 0);
+    [[maybe_unused]] const auto ux_2 = rotation.coeff(2, 0);
+
+    [[maybe_unused]] std::string coordinates;
+    if constexpr (DEBUG) {
+        base::StringAppendF(&coordinates, "ux: %f %f %f", ux_0, ux_1, ux_2);
+    }
+
+    /*
+     * World location of screen-inwards normal assuming the phone is situated
+     * in natural (phone == portrait) mode.
+     * (headset: user nose).
+     *
+     * auto uy = q.rotation() * Eigen::Vector3f{0.f, 1.f, 0.f};
+     *         = rotation.col(1);
+     */
+    [[maybe_unused]] const auto uy_0 = rotation.coeff(0, 1);
+    [[maybe_unused]] const auto uy_1 = rotation.coeff(1, 1);
+    [[maybe_unused]] const auto uy_2 = rotation.coeff(2, 1);
+    if constexpr (DEBUG) {
+        base::StringAppendF(&coordinates, "uy: %f %f %f", uy_0, uy_1, uy_2);
+    }
+
+    /*
+     * World location of unit vector top speaker.
+     * (headset: top of head).
+     * auto uz = q.rotation() * Eigen::Vector3f{0.f, 0.f, 1.f};
+     *         = rotation.col(2);
+     */
+    [[maybe_unused]] const auto uz_0 = rotation.coeff(0, 2);
+    [[maybe_unused]] const auto uz_1 = rotation.coeff(1, 2);
+    [[maybe_unused]] const auto uz_2 = rotation.coeff(2, 2);
+    if constexpr (DEBUG) {
+        base::StringAppendF(&coordinates, "uz: %f %f %f", uz_0, uz_1, uz_2);
+    }
+
+    // pitch computed from nose world Z coordinate;
+    // hence independent of rotation around world Z.
+    if (pitch != nullptr) {
+        *pitch = asin(std::clamp(uy_2, -1.f, 1.f));
+    }
+
+    // roll computed from head/right world Z coordinate;
+    // hence independent of rotation around world Z.
+    if (roll != nullptr) {
+        // atan2 takes care of implicit scale normalization of Z, X.
+        *roll = -atan2(ux_2, uz_2);
+    }
+
+    // yaw computed from right ear angle projected onto world XY plane
+    // where world Z == 0.  This is the rotation around world Z.
+    if (yaw != nullptr) {
+        // atan2 takes care of implicit scale normalization of X, Y.
+        *yaw =  atan2(ux_1, ux_0);
+    }
+
+    if constexpr (DEBUG) {
+        return coordinates;
+    }
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/include/media/VectorRecorder.h b/media/libheadtracking/include/media/VectorRecorder.h
new file mode 100644
index 0000000..1fb7521
--- /dev/null
+++ b/media/libheadtracking/include/media/VectorRecorder.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/stringprintf.h>
+#include <android-base/thread_annotations.h>
+#include <audio_utils/SimpleLog.h>
+#include <chrono>
+#include <math.h>
+#include <mutex>
+#include <vector>
+
+namespace android::media {
+
+/**
+ * VectorRecorder records a vector of floats computing the average, max, and min
+ * over given time periods.
+ *
+ * The class is thread-safe.
+ */
+class VectorRecorder {
+  public:
+    VectorRecorder(
+        size_t vectorSize, std::chrono::duration<double> threshold, int maxLogLine)
+        : mVectorSize(vectorSize)
+        , mRecordLog(maxLogLine)
+        , mRecordThreshold(threshold)
+    {
+        resetRecord_l();  // OK to call - we're in the constructor.
+    }
+
+    /** Convert recorded vector data to string with level indentation */
+    std::string toString(size_t indent) const;
+
+    /**
+     * @brief Record a vector of floats.
+     *
+     * @param record a vector of floats.
+     */
+    void record(const std::vector<float>& record);
+
+    /**
+     * Format vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
+     */
+    template <typename T>
+    static std::string toString(const std::vector<T>& record) {
+        if (record.size() == 0) {
+            return "[]";
+        }
+
+        std::string ss = "[";
+        for (size_t i = 0; i < record.size(); ++i) {
+            if (i > 0) {
+                ss.append(", ");
+            }
+            base::StringAppendF(&ss, "%0.2lf", static_cast<double>(record[i]));
+        }
+        ss.append("]");
+        return ss;
+    }
+
+  private:
+    static constexpr int mMaxLocalLogLine = 10;
+
+    const size_t mVectorSize;
+
+    // Local log for historical vector data.
+    // Locked internally, so does not need mutex below.
+    SimpleLog mRecordLog{mMaxLocalLogLine};
+
+    std::mutex mLock;
+
+    // Time threshold to record vectors in the local log.
+    // Vector data will be recorded into log at least every mRecordThreshold.
+    std::chrono::duration<double> mRecordThreshold GUARDED_BY(mLock);
+
+    // Number of seconds since first sample in mSum.
+    std::chrono::duration<double> mNumberOfSecondsSinceFirstSample GUARDED_BY(mLock);
+
+    // Timestamp of first sample recorded in mSum.
+    std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp GUARDED_BY(mLock);
+
+    // Number of samples in mSum.
+    size_t mNumberOfSamples GUARDED_BY(mLock) = 0;
+
+    std::vector<double> mSum GUARDED_BY(mLock);
+    std::vector<float> mMax GUARDED_BY(mLock);
+    std::vector<float> mMin GUARDED_BY(mLock);
+
+    // Computes mNumberOfSecondsSinceFirstSample, returns true if time to record.
+    bool shouldRecordLog_l() REQUIRES(mLock);
+
+    // Resets the running mNumberOfSamples, mSum, mMax, mMin.
+    void resetRecord_l() REQUIRES(mLock);
+
+    // Convert mSum to an average.
+    void sumToAverage_l() REQUIRES(mLock);
+};  // VectorRecorder
+
+}  // namespace android::media
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
index 5378f64..f40ab1c 100644
--- a/services/audiopolicy/engine/common/include/VolumeGroup.h
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -39,7 +39,7 @@
     VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; }
 
     void addSupportedAttributes(const audio_attributes_t &attr);
-    AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); }
+    AttributesVector getSupportedAttributes() const;
 
     void addSupportedStream(audio_stream_type_t stream);
     StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); }
diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
index e189807..f5ffbba 100644
--- a/services/audiopolicy/engine/common/src/VolumeGroup.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
@@ -37,6 +37,17 @@
 {
 }
 
+// Used for introspection, e.g. JAVA
+AttributesVector VolumeGroup::getSupportedAttributes() const
+{
+    AttributesVector supportedAttributes = {};
+    for (auto &aa : mGroupVolumeCurves.getAttributes()) {
+        aa.source = AUDIO_SOURCE_INVALID;
+        supportedAttributes.push_back(aa);
+    }
+    return supportedAttributes;
+}
+
 void VolumeGroup::dump(String8 *dst, int spaces) const
 {
     dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 45c5eac..134314f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -291,7 +291,9 @@
 
         if ((strategy == STRATEGY_SONIFICATION) ||
                 (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
-            devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
+            // favor dock over speaker when available
+            devices = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_SPEAKER});
         }
 
         // if SCO headset is connected and we are told to use it, play ringtone over
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index e736677..365c3a4 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2020,6 +2020,10 @@
         outputDesc->stop();
         return status;
     }
+    if (client->hasPreferredDevice()) {
+        // playback activity with preferred device impacts routing occurred, inform upper layers
+        mpClientInterface->onRoutingUpdated();
+    }
     if (delayMs != 0) {
         usleep(delayMs * 1000);
     }
@@ -2265,6 +2269,11 @@
     }
     sp<TrackClientDescriptor> client = outputDesc->getClient(portId);
 
+    if (client->hasPreferredDevice(true)) {
+        // playback activity with preferred device impacts routing occurred, inform upper layers
+        mpClientInterface->onRoutingUpdated();
+    }
+
     ALOGV("stopOutput() output %d, stream %d, session %d",
           outputDesc->mIoHandle, client->stream(), client->session());
 
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 2fe7b9e..2f65f39 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -33,6 +33,7 @@
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/MediaMetricsItem.h>
+#include <media/QuaternionUtil.h>
 #include <media/ShmemCompat.h>
 #include <mediautils/SchedulingPolicyService.h>
 #include <mediautils/ServiceUtilities.h>
@@ -75,6 +76,24 @@
     return maxMask;
 }
 
+static std::vector<float> recordFromTranslationRotationVector(
+        const std::vector<float>& trVector) {
+    auto headToStageOpt = Pose3f::fromVector(trVector);
+    if (!headToStageOpt) return {};
+
+    const auto stageToHead = headToStageOpt.value().inverse();
+    const auto stageToHeadTranslation = stageToHead.translation();
+    constexpr float RAD_TO_DEGREE = 180.f / M_PI;
+    std::vector<float> record{
+        stageToHeadTranslation[0], stageToHeadTranslation[1], stageToHeadTranslation[2],
+        0.f, 0.f, 0.f};
+    media::quaternionToAngles(stageToHead.rotation(), &record[3], &record[4], &record[5]);
+    record[3] *= RAD_TO_DEGREE;
+    record[4] *= RAD_TO_DEGREE;
+    record[5] *= RAD_TO_DEGREE;
+    return record;
+}
+
 // ---------------------------------------------------------------------------
 
 class Spatializer::EngineCallbackHandler : public AHandler {
@@ -185,41 +204,6 @@
 };
 
 // ---------------------------------------------------------------------------
-
-// Convert recorded sensor data to string with level indentation.
-std::string Spatializer::HeadToStagePoseRecorder::toString(unsigned level) const {
-    std::string prefixSpace(level, ' ');
-    return mPoseRecordLog.dumpToString((prefixSpace + " ").c_str(), Spatializer::mMaxLocalLogLine);
-}
-
-// Compute sensor data, record into local log when it is time.
-void Spatializer::HeadToStagePoseRecorder::record(const std::vector<float>& headToStage) {
-    if (headToStage.size() != mPoseVectorSize) return;
-
-    if (mNumOfSampleSinceLastRecord++ == 0) {
-        mFirstSampleTimestamp = std::chrono::steady_clock::now();
-    }
-    // if it's time, do record and reset.
-    if (shouldRecordLog()) {
-        poseSumToAverage();
-        mPoseRecordLog.log(
-                "mean: %s, min: %s, max %s, calculated %d samples in %0.4f second(s)",
-                Spatializer::toString<double>(mPoseRadianSum, true /* radianToDegree */).c_str(),
-                Spatializer::toString<float>(mMinPoseAngle, true /* radianToDegree */).c_str(),
-                Spatializer::toString<float>(mMaxPoseAngle, true /* radianToDegree */).c_str(),
-                mNumOfSampleSinceLastRecord, mNumOfSecondsSinceLastRecord.count());
-        resetRecord();
-    }
-    // update stream average.
-    for (int i = 0; i < mPoseVectorSize; i++) {
-        mPoseRadianSum[i] += headToStage[i];
-        mMaxPoseAngle[i] = std::max(mMaxPoseAngle[i], headToStage[i]);
-        mMinPoseAngle[i] = std::min(mMinPoseAngle[i], headToStage[i]);
-    }
-    return;
-}
-
-// ---------------------------------------------------------------------------
 sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
     sp<Spatializer> spatializer;
 
@@ -590,7 +574,8 @@
     }
     std::lock_guard lock(mLock);
     if (mPoseController != nullptr) {
-        mLocalLog.log("%s with screenToStage %s", __func__, toString<float>(screenToStage).c_str());
+        mLocalLog.log("%s with screenToStage %s", __func__,
+                media::VectorRecorder::toString<float>(screenToStage).c_str());
         mPoseController->setScreenToStagePose(maybePose.value());
     }
     return Status::ok();
@@ -771,8 +756,9 @@
         callback = mHeadTrackingCallback;
         if (mEngine != nullptr) {
             setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
-            mPoseRecorder.record(headToStage);
-            mPoseDurableRecorder.record(headToStage);
+            const auto record = recordFromTranslationRotationVector(headToStage);
+            mPoseRecorder.record(record);
+            mPoseDurableRecorder.record(record);
         }
     }
 
@@ -1048,8 +1034,7 @@
 }
 
 std::string Spatializer::toString(unsigned level) const {
-    std::string prefixSpace;
-    prefixSpace.append(level, ' ');
+    std::string prefixSpace(level, ' ');
     std::string ss = prefixSpace + "Spatializer:\n";
     bool needUnlock = false;
 
@@ -1105,14 +1090,15 @@
 
     // PostController dump.
     if (mPoseController != nullptr) {
-        ss += mPoseController->toString(level + 1);
-        ss.append(prefixSpace +
-                  "Sensor data format - [rx, ry, rz, vx, vy, vz] (units-degree, "
-                  "r-transform, v-angular velocity, x-pitch, y-roll, z-yaw):\n");
-        ss.append(prefixSpace + " PerMinuteHistory:\n");
-        ss += mPoseDurableRecorder.toString(level + 1);
-        ss.append(prefixSpace + " PerSecondHistory:\n");
-        ss += mPoseRecorder.toString(level + 1);
+        ss.append(mPoseController->toString(level + 1))
+            .append(prefixSpace)
+            .append("Pose (active stage-to-head) [tx, ty, tz, pitch, roll, yaw]:\n")
+            .append(prefixSpace)
+            .append(" PerMinuteHistory:\n")
+            .append(mPoseDurableRecorder.toString(level + 2))
+            .append(prefixSpace)
+            .append(" PerSecondHistory:\n")
+            .append(mPoseRecorder.toString(level + 2));
     } else {
         ss.append(prefixSpace).append("SpatializerPoseController not exist\n");
     }
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 0f6bafe..b433e1a 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -27,6 +27,7 @@
 #include <audio_utils/SimpleLog.h>
 #include <math.h>
 #include <media/AudioEffect.h>
+#include <media/VectorRecorder.h>
 #include <media/audiohal/EffectHalInterface.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <system/audio_effects/effect_spatializer.h>
@@ -172,30 +173,6 @@
                 media::audio::common::toString(*result) : "unknown_latency_mode";
     }
 
-    /**
-     * Format head to stage vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
-     */
-    template <typename T>
-    static std::string toString(const std::vector<T>& vec, bool radianToDegree = false) {
-        if (vec.size() == 0) {
-            return "[]";
-        }
-
-        std::string ss = "[";
-        for (auto f = vec.begin(); f != vec.end(); ++f) {
-            if (f != vec.begin()) {
-                ss .append(", ");
-            }
-            if (radianToDegree) {
-                base::StringAppendF(&ss, "%0.2f", HeadToStagePoseRecorder::getDegreeWithRadian(*f));
-            } else {
-                base::StringAppendF(&ss, "%f", *f);
-            }
-        }
-        ss.append("]");
-        return ss;
-    };
-
     // If the Spatializer is not created, we send the status for metrics purposes.
     // OK:      Spatializer not expected to be created.
     // NO_INIT: Spatializer creation failed.
@@ -427,92 +404,12 @@
      * @brief Calculate and record sensor data.
      * Dump to local log with max/average pose angle every mPoseRecordThreshold.
      */
-    class HeadToStagePoseRecorder {
-      public:
-        HeadToStagePoseRecorder(std::chrono::duration<double> threshold, int maxLogLine)
-            : mPoseRecordThreshold(threshold), mPoseRecordLog(maxLogLine) {
-            resetRecord();
-        }
-
-        /** Convert recorded sensor data to string with level indentation */
-        std::string toString(unsigned level) const;
-
-        /**
-         * @brief Calculate sensor data, record into local log when it is time.
-         *
-         * @param headToStage The vector from Pose3f::toVector().
-         */
-        void record(const std::vector<float>& headToStage);
-
-        static constexpr float getDegreeWithRadian(const float radian) {
-            float radianToDegreeRatio = (180 / PI);
-            return (radian * radianToDegreeRatio);
-        }
-
-      private:
-        static constexpr float PI = M_PI;
-        /**
-         * Pose recorder time threshold to record sensor data in local log.
-         * Sensor data will be recorded into log at least every mPoseRecordThreshold.
-         */
-        std::chrono::duration<double> mPoseRecordThreshold;
-        // Number of seconds pass since last record.
-        std::chrono::duration<double> mNumOfSecondsSinceLastRecord;
-        /**
-         * According to frameworks/av/media/libheadtracking/include/media/Pose.h
-         * "The vector will have exactly 6 elements, where the first three are a translation vector
-         * and the last three are a rotation vector."
-         */
-        static constexpr size_t mPoseVectorSize = 6;
-        /**
-         * Timestamp of last sensor data record in local log.
-         */
-        std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp;
-        /**
-         * Number of sensor samples received since last record, sample rate is ~100Hz which produce
-         * ~6k samples/minute.
-         */
-        uint32_t mNumOfSampleSinceLastRecord = 0;
-        /* The sum of pose angle represented by radian since last dump, div
-         * mNumOfSampleSinceLastRecord to get arithmetic mean. Largest possible value: 2PI * 100Hz *
-         * mPoseRecordThreshold.
-         */
-        std::vector<double> mPoseRadianSum;
-        std::vector<float> mMaxPoseAngle;
-        std::vector<float> mMinPoseAngle;
-        // Local log for history sensor data.
-        SimpleLog mPoseRecordLog{mMaxLocalLogLine};
-
-        bool shouldRecordLog() {
-            mNumOfSecondsSinceLastRecord = std::chrono::duration_cast<std::chrono::seconds>(
-                    std::chrono::steady_clock::now() - mFirstSampleTimestamp);
-            return mNumOfSecondsSinceLastRecord >= mPoseRecordThreshold;
-        }
-
-        void resetRecord() {
-            mPoseRadianSum.assign(mPoseVectorSize, 0);
-            mMaxPoseAngle.assign(mPoseVectorSize, -PI);
-            mMinPoseAngle.assign(mPoseVectorSize, PI);
-            mNumOfSampleSinceLastRecord = 0;
-            mNumOfSecondsSinceLastRecord = std::chrono::seconds(0);
-        }
-
-        // Add each sample to sum and only calculate when record.
-        void poseSumToAverage() {
-            if (mNumOfSampleSinceLastRecord == 0) return;
-            for (auto& p : mPoseRadianSum) {
-                const float reciprocal = 1.f / mNumOfSampleSinceLastRecord;
-                p *= reciprocal;
-            }
-        }
-    };  // HeadToStagePoseRecorder
-
     // Record one log line per second (up to mMaxLocalLogLine) to capture most recent sensor data.
-    HeadToStagePoseRecorder mPoseRecorder GUARDED_BY(mLock) =
-            HeadToStagePoseRecorder(std::chrono::seconds(1), mMaxLocalLogLine);
+    media::VectorRecorder mPoseRecorder GUARDED_BY(mLock) {
+        6 /* vectorSize */, std::chrono::seconds(1), mMaxLocalLogLine };
     // Record one log line per minute (up to mMaxLocalLogLine) to capture durable sensor data.
-    HeadToStagePoseRecorder mPoseDurableRecorder GUARDED_BY(mLock) =
-            HeadToStagePoseRecorder(std::chrono::minutes(1), mMaxLocalLogLine);
+    media::VectorRecorder mPoseDurableRecorder  GUARDED_BY(mLock) {
+        6 /* vectorSize */, std::chrono::minutes(1), mMaxLocalLogLine };
 };  // Spatializer
 
 }; // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 72dba3d..2ac2af7 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 #include "SpatializerPoseController.h"
 #include <android-base/stringprintf.h>
 #include <chrono>
@@ -23,6 +24,7 @@
 //#define LOG_NDEBUG 0
 #include <sensor/Sensor.h>
 #include <media/MediaMetricsItem.h>
+#include <media/QuaternionUtil.h>
 #include <utils/Log.h>
 #include <utils/SystemClock.h>
 
@@ -192,7 +194,7 @@
         mHeadSensor = INVALID_SENSOR;
     }
 
-    mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */);
+    mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */, __func__);
 }
 
 void SpatializerPoseController::setScreenSensor(int32_t sensor) {
@@ -229,7 +231,7 @@
         mScreenSensor = INVALID_SENSOR;
     }
 
-    mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */);
+    mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */, __func__);
 }
 
 void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
@@ -276,30 +278,59 @@
 
 void SpatializerPoseController::recenter() {
     std::lock_guard lock(mMutex);
-    mProcessor->recenter();
+    mProcessor->recenter(true /* recenterHead */, true /* recenterScreen */, __func__);
 }
 
 void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
                                        const std::optional<Twist3f>& twist, bool isNewReference) {
     std::lock_guard lock(mMutex);
+    constexpr float NANOS_TO_MILLIS = 1e-6;
+    constexpr float RAD_TO_DEGREE = 180.f / M_PI;
+
+    const float delayMs = (elapsedRealtimeNano() - timestamp) * NANOS_TO_MILLIS; // CLOCK_BOOTTIME
+
     if (sensor == mHeadSensor) {
+        std::vector<float> pryxyzdt(8);  // pitch, roll, yaw, rot_vel_x, rot_vel_y, rot_vel_z,
+                                         // discontinuity, timestamp_delay
+        media::quaternionToAngles(pose.rotation(), &pryxyzdt[0], &pryxyzdt[1], &pryxyzdt[2]);
+        if (twist) {
+            const auto rotationalVelocity = twist->rotationalVelocity();
+            for (size_t i = 0; i < 3; ++i) {
+                pryxyzdt[i + 3] = rotationalVelocity[i];
+            }
+        }
+        pryxyzdt[6] = isNewReference;
+        pryxyzdt[7] = delayMs;
+        for (size_t i = 0; i < 3; ++i) { // pitch, roll, yaw only.  rotational velocity in rad/s.
+            pryxyzdt[i] *= RAD_TO_DEGREE;
+        }
+        mHeadSensorRecorder.record(pryxyzdt);
+        mHeadSensorDurableRecorder.record(pryxyzdt);
+
         mProcessor->setWorldToHeadPose(timestamp, pose,
                                        twist.value_or(Twist3f()) / kTicksPerSecond);
         if (isNewReference) {
-            mProcessor->recenter(true, false);
+            mProcessor->recenter(true, false, __func__);
         }
     }
     if (sensor == mScreenSensor) {
+        std::vector<float> pryt{ 0.f, 0.f, 0.f, delayMs}; // pitch, roll, yaw, timestamp_delay
+        media::quaternionToAngles(pose.rotation(), &pryt[0], &pryt[1], &pryt[2]);
+        for (size_t i = 0; i < 3; ++i) {
+            pryt[i] *= RAD_TO_DEGREE;
+        }
+        mScreenSensorRecorder.record(pryt);
+        mScreenSensorDurableRecorder.record(pryt);
+
         mProcessor->setWorldToScreenPose(timestamp, pose);
         if (isNewReference) {
-            mProcessor->recenter(false, true);
+            mProcessor->recenter(false, true, __func__);
         }
     }
 }
 
 std::string SpatializerPoseController::toString(unsigned level) const {
-    std::string prefixSpace;
-    prefixSpace.append(level, ' ');
+    std::string prefixSpace(level, ' ');
     std::string ss = prefixSpace + "SpatializerPoseController:\n";
     bool needUnlock = false;
 
@@ -315,14 +346,30 @@
     if (mHeadSensor == INVALID_SENSOR) {
         ss += "HeadSensor: INVALID\n";
     } else {
-        base::StringAppendF(&ss, "HeadSensor: 0x%08x\n", mHeadSensor);
+        base::StringAppendF(&ss, "HeadSensor: 0x%08x (active world-to-head) "
+            "[ pitch, roll, yaw, vx, vy, vz, disc, delay ] "
+            "(degrees, rad/s, bool, ms)\n", mHeadSensor);
+        ss.append(prefixSpace)
+            .append(" PerMinuteHistory:\n")
+            .append(mHeadSensorDurableRecorder.toString(level + 2))
+            .append(prefixSpace)
+            .append(" PerSecondHistory:\n")
+            .append(mHeadSensorRecorder.toString(level + 2));
     }
 
     ss += prefixSpace;
     if (mScreenSensor == INVALID_SENSOR) {
         ss += "ScreenSensor: INVALID\n";
     } else {
-        base::StringAppendF(&ss, "ScreenSensor: 0x%08x\n", mScreenSensor);
+        base::StringAppendF(&ss, "ScreenSensor: 0x%08x (active world-to-screen) "
+            "[ pitch, roll, yaw, delay ] "
+            "(degrees, ms)\n", mScreenSensor);
+        ss.append(prefixSpace)
+            .append(" PerMinuteHistory:\n")
+            .append(mScreenSensorDurableRecorder.toString(level + 2))
+            .append(prefixSpace)
+            .append(" PerSecondHistory:\n")
+            .append(mScreenSensorRecorder.toString(level + 2));
     }
 
     ss += prefixSpace;
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
index 233f94c..ee2c2be 100644
--- a/services/audiopolicy/service/SpatializerPoseController.h
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -24,6 +24,7 @@
 
 #include <media/HeadTrackingProcessor.h>
 #include <media/SensorPoseProvider.h>
+#include <media/VectorRecorder.h>
 
 namespace android {
 
@@ -131,6 +132,16 @@
     bool mShouldExit = false;
     bool mCalculated = false;
 
+    media::VectorRecorder mHeadSensorRecorder{
+        8 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */};
+    media::VectorRecorder mHeadSensorDurableRecorder{
+        8 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */};
+
+    media::VectorRecorder mScreenSensorRecorder{
+        4 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */};
+    media::VectorRecorder mScreenSensorDurableRecorder{
+        4 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */};
+
     // It's important that mThread is the last variable in this class
     // since we starts mThread in initializer list
     std::thread mThread;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 2a04658..a7c9bac 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2043,6 +2043,7 @@
         }
 
         client->setImageDumpMask(mImageDumpMask);
+        client->setStreamUseCaseOverrides(mStreamUseCaseOverrides);
     } // lock is destroyed, allow further connect calls
 
     // Important: release the mutex here so the client can call back into the service from its
@@ -4419,6 +4420,13 @@
     String8 activeClientString = mActiveClientManager.toString();
     dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
     dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
+    if (mStreamUseCaseOverrides.size() > 0) {
+        dprintf(fd, "Active stream use case overrides:");
+        for (int64_t useCaseOverride : mStreamUseCaseOverrides) {
+            dprintf(fd, " %" PRId64, useCaseOverride);
+        }
+        dprintf(fd, "\n");
+    }
 
     dumpEventLog(fd);
 
@@ -4910,6 +4918,10 @@
         return handleGetImageDumpMask(out);
     } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
         return handleSetCameraMute(args);
+    } else if (args.size() >= 2 && args[0] == String16("set-stream-use-case-override")) {
+        return handleSetStreamUseCaseOverrides(args);
+    } else if (args.size() >= 1 && args[0] == String16("clear-stream-use-case-override")) {
+        return handleClearStreamUseCaseOverrides();
     } else if (args.size() >= 2 && args[0] == String16("watch")) {
         return handleWatchCommand(args, in, out);
     } else if (args.size() >= 2 && args[0] == String16("set-watchdog")) {
@@ -5082,6 +5094,43 @@
     return OK;
 }
 
+status_t CameraService::handleSetStreamUseCaseOverrides(const Vector<String16>& args) {
+    std::vector<int64_t> useCasesOverride;
+    for (size_t i = 1; i < args.size(); i++) {
+        int64_t useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+        String8 arg8 = String8(args[i]);
+        if (arg8 == "DEFAULT") {
+            useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+        } else if (arg8 == "PREVIEW") {
+            useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW;
+        } else if (arg8 == "STILL_CAPTURE") {
+            useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE;
+        } else if (arg8 == "VIDEO_RECORD") {
+            useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD;
+        } else if (arg8 == "PREVIEW_VIDEO_STILL") {
+            useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL;
+        } else if (arg8 == "VIDEO_CALL") {
+            useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_CALL;
+        } else {
+            ALOGE("%s: Invalid stream use case %s", __FUNCTION__, String8(args[i]).c_str());
+            return BAD_VALUE;
+        }
+        useCasesOverride.push_back(useCase);
+    }
+
+    Mutex::Autolock lock(mServiceLock);
+    mStreamUseCaseOverrides = std::move(useCasesOverride);
+
+    return OK;
+}
+
+status_t CameraService::handleClearStreamUseCaseOverrides() {
+    Mutex::Autolock lock(mServiceLock);
+    mStreamUseCaseOverrides.clear();
+
+    return OK;
+}
+
 status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
     if (args.size() >= 3 && args[1] == String16("start")) {
         return startWatchingTags(args, outFd);
@@ -5436,6 +5485,15 @@
         "      Valid values 0=OFF, 1=ON for JPEG\n"
         "  get-image-dump-mask returns the current image-dump-mask value\n"
         "  set-camera-mute <0/1> enable or disable camera muting\n"
+        "  set-stream-use-case-override <usecase1> <usecase2> ... override stream use cases\n"
+        "      Use cases applied in descending resolutions. So usecase1 is assigned to the\n"
+        "      largest resolution, usecase2 is assigned to the 2nd largest resolution, and so\n"
+        "      on. In case the number of usecases is smaller than the number of streams, the\n"
+        "      last use case is assigned to all the remaining streams. In case of multiple\n"
+        "      streams with the same resolution, the tie-breaker is (JPEG, RAW, YUV, and PRIV)\n"
+        "      Valid values are (case sensitive): DEFAULT, PREVIEW, STILL_CAPTURE, VIDEO_RECORD,\n"
+        "      PREVIEW_VIDEO_STILL, VIDEO_CALL\n"
+        "  clear-stream-use-case-override clear the stream use case override\n"
         "  watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
         "  help print this message\n");
 }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 840e9b6..588cfc0 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -348,6 +348,13 @@
         // Set Camera service watchdog
         virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
 
+        // Set stream use case overrides
+        virtual void setStreamUseCaseOverrides(
+                const std::vector<int64_t>& useCaseOverrides) = 0;
+
+        // Clear stream use case overrides
+        virtual void clearStreamUseCaseOverrides() = 0;
+
         // The injection camera session to replace the internal camera
         // session.
         virtual status_t injectCamera(const String8& injectedCamId,
@@ -502,6 +509,7 @@
         virtual bool canCastToApiClient(apiLevel level) const;
 
         void setImageDumpMask(int /*mask*/) { }
+        void setStreamUseCaseOverrides(const std::vector<int64_t>& /*usecaseOverrides*/) { }
     protected:
         // Initialized in constructor
 
@@ -1216,6 +1224,12 @@
     // Set the camera mute state
     status_t handleSetCameraMute(const Vector<String16>& args);
 
+    // Set the stream use case overrides
+    status_t handleSetStreamUseCaseOverrides(const Vector<String16>& args);
+
+    // Clear the stream use case overrides
+    status_t handleClearStreamUseCaseOverrides();
+
     // Handle 'watch' command as passed through 'cmd'
     status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
 
@@ -1311,6 +1325,9 @@
     // Camera Service watchdog flag
     bool mCameraServiceWatchdogEnabled = true;
 
+    // Current stream use case overrides
+    std::vector<int64_t> mStreamUseCaseOverrides;
+
     /**
      * A listener class that implements the IBinder::DeathRecipient interface
      * for use to call back the error state injected by the external camera, and
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 0887ced..430c82b 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -2347,6 +2347,15 @@
     return mDevice->setCameraMute(enabled);
 }
 
+void Camera2Client::setStreamUseCaseOverrides(
+        const std::vector<int64_t>& useCaseOverrides) {
+    mDevice->setStreamUseCaseOverrides(useCaseOverrides);
+}
+
+void Camera2Client::clearStreamUseCaseOverrides() {
+    mDevice->clearStreamUseCaseOverrides();
+}
+
 status_t Camera2Client::waitUntilCurrentRequestIdLocked() {
     int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
     if (activeRequestId != 0) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 9c540a4..8071bcb 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -92,6 +92,10 @@
 
     virtual status_t        setCameraServiceWatchdog(bool enabled);
 
+    virtual void            setStreamUseCaseOverrides(
+                                    const std::vector<int64_t>& useCaseOverrides);
+    virtual void            clearStreamUseCaseOverrides();
+
     /**
      * Interface used by CameraService
      */
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index bc76397..dd23c2e 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1752,6 +1752,15 @@
     return mDevice->setCameraMute(enabled);
 }
 
+void CameraDeviceClient::setStreamUseCaseOverrides(
+        const std::vector<int64_t>& useCaseOverrides) {
+    mDevice->setStreamUseCaseOverrides(useCaseOverrides);
+}
+
+void CameraDeviceClient::clearStreamUseCaseOverrides() {
+    mDevice->clearStreamUseCaseOverrides();
+}
+
 binder::Status CameraDeviceClient::switchToOffline(
         const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
         const std::vector<int>& offlineOutputIds,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 6bb64d6..c95bb4a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -210,6 +210,9 @@
 
     virtual status_t      setCameraServiceWatchdog(bool enabled);
 
+    virtual void          setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides);
+    virtual void          clearStreamUseCaseOverrides() override;
+
     /**
      * Device listener interface
      */
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 2cb3397..52d0020 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -90,6 +90,13 @@
     return INVALID_OPERATION;
 }
 
+void CameraOfflineSessionClient::setStreamUseCaseOverrides(
+        const std::vector<int64_t>& /*useCaseOverrides*/) {
+}
+
+void CameraOfflineSessionClient::clearStreamUseCaseOverrides() {
+}
+
 
 status_t CameraOfflineSessionClient::dump(int fd, const Vector<String16>& args) {
     return BasicClient::dump(fd, args);
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 8edb64a..23e1f3d 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -87,6 +87,11 @@
 
     status_t setCameraServiceWatchdog(bool enabled) override;
 
+    void setStreamUseCaseOverrides(
+            const std::vector<int64_t>& useCaseOverrides) override;
+
+    void clearStreamUseCaseOverrides() override;
+
     // permissions management
     status_t startCameraOps() override;
     status_t finishCameraOps() override;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ad24392..bf6be64 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -343,6 +343,29 @@
 }
 
 template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyPhysicalCameraChange(const std::string &physicalId) {
+    // We're only interested in this notification if overrideToPortrait is turned on.
+    if (!TClientBase::mOverrideToPortrait) {
+        return;
+    }
+
+    String8 physicalId8(physicalId.c_str());
+    auto physicalCameraMetadata = mDevice->infoPhysical(physicalId8);
+    auto orientationEntry = physicalCameraMetadata.find(ANDROID_SENSOR_ORIENTATION);
+
+    if (orientationEntry.count == 1) {
+        int orientation = orientationEntry.data.i32[0];
+        int rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_NONE;
+
+        if (orientation == 0 || orientation == 180) {
+            rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_90;
+        }
+
+        static_cast<TClientBase *>(this)->setRotateAndCropOverride(rotateAndCropMode);
+    }
+}
+
+template <typename TClientBase>
 status_t Camera2ClientBase<TClientBase>::notifyActive(float maxPreviewFps) {
     if (!mDeviceActive) {
         status_t res = TClientBase::startCameraStreamingOps();
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index d2dcdb1..705fe69 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -75,6 +75,7 @@
 
     virtual void          notifyError(int32_t errorCode,
                                       const CaptureResultExtras& resultExtras);
+    virtual void          notifyPhysicalCameraChange(const std::string &physicalId) override;
     // Returns errors on app ops permission failures
     virtual status_t      notifyActive(float maxPreviewFps);
     virtual void          notifyIdle(int64_t /*requestCount*/, int64_t /*resultErrorCount*/,
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 69514f3..8f7b16d 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -464,6 +464,15 @@
     void setImageDumpMask(int mask) { mImageDumpMask = mask; }
 
     /**
+     * Set stream use case overrides
+     */
+    void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides) {
+          mStreamUseCaseOverrides = useCaseOverrides;
+    }
+
+    void clearStreamUseCaseOverrides() {}
+
+    /**
      * The injection camera session to replace the internal camera
      * session.
      */
@@ -477,6 +486,7 @@
 
 protected:
     bool mImageDumpMask = 0;
+    std::vector<int64_t> mStreamUseCaseOverrides;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
index f39b92a..63abcf0 100644
--- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
+++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
@@ -40,6 +40,10 @@
     // Required for API 1 and 2
     virtual void notifyError(int32_t errorCode,
                              const CaptureResultExtras &resultExtras) = 0;
+
+    // Optional for API 1 and 2
+    virtual void notifyPhysicalCameraChange(const std::string &/*physicalId*/) {}
+
     // May return an error since it checks appops
     virtual status_t notifyActive(float maxPreviewFps) = 0;
     virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 3aab0b1..5ab7023 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -2033,7 +2033,7 @@
         }
         CameraMetadata info2;
         res = device->getCameraCharacteristics(true /*overrideForPerfClass*/, &info2,
-                /*overrideToPortrait*/true);
+                /*overrideToPortrait*/false);
         if (res == INVALID_OPERATION) {
             dprintf(fd, "  API2 not directly supported\n");
         } else if (res != OK) {
@@ -2384,8 +2384,8 @@
     if (overrideToPortrait) {
         const auto &lensFacingEntry = characteristics->find(ANDROID_LENS_FACING);
         const auto &sensorOrientationEntry = characteristics->find(ANDROID_SENSOR_ORIENTATION);
+        uint8_t lensFacing = lensFacingEntry.data.u8[0];
         if (lensFacingEntry.count > 0 && sensorOrientationEntry.count > 0) {
-            uint8_t lensFacing = lensFacingEntry.data.u8[0];
             int32_t sensorOrientation = sensorOrientationEntry.data.i32[0];
             int32_t newSensorOrientation = sensorOrientation;
 
@@ -2406,6 +2406,8 @@
         }
 
         if (characteristics->exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+            ALOGV("%s: Erasing ANDROID_INFO_DEVICE_STATE_ORIENTATIONS for lens facing %d",
+                    __FUNCTION__, lensFacing);
             characteristics->erase(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
         }
     }
@@ -2441,8 +2443,8 @@
     for (size_t i = 0; i < streamConfigs.count; i += 4) {
         if ((streamConfigs.data.i32[i] == HAL_PIXEL_FORMAT_BLOB) && (streamConfigs.data.i32[i+3] ==
                 ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
-            if (streamConfigs.data.i32[i+1] < thresholdW  ||
-                    streamConfigs.data.i32[i+2] < thresholdH) {
+            if (streamConfigs.data.i32[i+1] * streamConfigs.data.i32[i+2] <
+                    thresholdW * thresholdH) {
                 continue;
             } else {
                 largeJpegCount ++;
@@ -2462,8 +2464,8 @@
             mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
     for (size_t i = 0; i < minDurations.count; i += 4) {
         if (minDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) {
-            if (minDurations.data.i64[i+1] < thresholdW ||
-                    minDurations.data.i64[i+2] < thresholdH) {
+            if ((int32_t)minDurations.data.i64[i+1] * (int32_t)minDurations.data.i64[i+2] <
+                    thresholdW * thresholdH) {
                 continue;
             } else {
                 largeJpegCount++;
@@ -2483,8 +2485,8 @@
             mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS);
     for (size_t i = 0; i < stallDurations.count; i += 4) {
         if (stallDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) {
-            if (stallDurations.data.i64[i+1] < thresholdW ||
-                    stallDurations.data.i64[i+2] < thresholdH) {
+            if ((int32_t)stallDurations.data.i64[i+1] * (int32_t)stallDurations.data.i64[i+2] <
+                    thresholdW * thresholdH) {
                 continue;
             } else {
                 largeJpegCount++;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 5e7fe7f..e631f8b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -96,7 +96,8 @@
         mLastTemplateId(-1),
         mNeedFixupMonochromeTags(false),
         mOverrideForPerfClass(overrideForPerfClass),
-        mOverrideToPortrait(overrideToPortrait)
+        mOverrideToPortrait(overrideToPortrait),
+        mActivePhysicalId("")
 {
     ATRACE_CALL();
     ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string());
@@ -2345,6 +2346,9 @@
         tryRemoveFakeStreamLocked();
     }
 
+    // Override stream use case based on "adb shell command"
+    overrideStreamUseCaseLocked();
+
     // Start configuring the streams
     ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string());
 
@@ -4124,6 +4128,19 @@
     return OK;
 }
 
+void Camera3Device::setStreamUseCaseOverrides(
+        const std::vector<int64_t>& useCaseOverrides) {
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+    mStreamUseCaseOverrides = useCaseOverrides;
+}
+
+void Camera3Device::clearStreamUseCaseOverrides() {
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+    mStreamUseCaseOverrides.clear();
+}
+
 void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
     if (mNextRequests.empty()) {
         return;
@@ -5242,4 +5259,55 @@
     return mInjectionMethods->stopInjection();
 }
 
+void Camera3Device::overrideStreamUseCaseLocked() {
+    if (mStreamUseCaseOverrides.size() == 0) {
+        return;
+    }
+
+    // Start from an array of indexes in mStreamUseCaseOverrides, and sort them
+    // based first on size, and second on formats of [JPEG, RAW, YUV, PRIV].
+    std::vector<int> outputStreamsIndices(mOutputStreams.size());
+    for (size_t i = 0; i < outputStreamsIndices.size(); i++) {
+        outputStreamsIndices[i] = i;
+    }
+
+    std::sort(outputStreamsIndices.begin(), outputStreamsIndices.end(),
+            [&](int a, int b) -> bool {
+
+                auto formatScore = [](int format) {
+                    switch (format) {
+                    case HAL_PIXEL_FORMAT_BLOB:
+                        return 4;
+                    case HAL_PIXEL_FORMAT_RAW16:
+                    case HAL_PIXEL_FORMAT_RAW10:
+                    case HAL_PIXEL_FORMAT_RAW12:
+                        return 3;
+                    case HAL_PIXEL_FORMAT_YCBCR_420_888:
+                        return 2;
+                    case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+                        return 1;
+                    default:
+                        return 0;
+                    }
+                };
+
+                int sizeA = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight();
+                int sizeB = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight();
+                int formatAScore = formatScore(mOutputStreams[a]->getFormat());
+                int formatBScore = formatScore(mOutputStreams[b]->getFormat());
+                if (sizeA > sizeB ||
+                        (sizeA == sizeB && formatAScore >= formatBScore)) {
+                    return true;
+                } else {
+                    return false;
+                }
+            });
+
+    size_t overlapSize = std::min(mStreamUseCaseOverrides.size(), mOutputStreams.size());
+    for (size_t i = 0; i < mOutputStreams.size(); i++) {
+        mOutputStreams[outputStreamsIndices[i]]->setStreamUseCase(
+                mStreamUseCaseOverrides[std::min(i, overlapSize-1)]);
+    }
+}
+
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 1a50c02..746205b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -287,6 +287,13 @@
      */
     status_t setCameraServiceWatchdog(bool enabled);
 
+    // Set stream use case overrides
+    void setStreamUseCaseOverrides(
+            const std::vector<int64_t>& useCaseOverrides);
+
+    // Clear stream use case overrides
+    void clearStreamUseCaseOverrides();
+
     // Get the status trackeer for the camera device
     wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
 
@@ -1375,6 +1382,9 @@
     // app compatibility reasons.
     bool mOverrideToPortrait;
 
+    // Current active physical id of the logical multi-camera, if any
+    std::string mActivePhysicalId;
+
     // The current minimum expected frame duration based on AE_TARGET_FPS_RANGE
     nsecs_t mMinExpectedDuration = 0;
     // Whether the camera device runs at fixed frame rate based on AE_MODE and
@@ -1463,6 +1473,8 @@
 
     sp<Camera3DeviceInjectionMethods> mInjectionMethods;
 
+    void overrideStreamUseCaseLocked();
+
 }; // class Camera3Device
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index a93d1da..1e9f478 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -101,6 +101,8 @@
     virtual status_t setBatchSize(size_t batchSize) override;
 
     virtual void onMinDurationChanged(nsecs_t /*duration*/, bool /*fixedFps*/) {}
+
+    virtual void setStreamUseCase(int64_t /*streamUseCase*/) {}
   protected:
 
     /**
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 396104c..ef12b64 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -1392,6 +1392,11 @@
     mFixedFps = fixedFps;
 }
 
+void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) {
+    Mutex::Autolock l(mLock);
+    camera_stream::use_case = streamUseCase;
+}
+
 void Camera3OutputStream::returnPrefetchedBuffersLocked() {
     std::vector<Surface::BatchBuffer> batchedBuffers;
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index db988a0..a719d6b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -253,6 +253,11 @@
     virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) override;
 
     /**
+     * Modify stream use case
+     */
+    virtual void setStreamUseCase(int64_t streamUseCase) override;
+
+    /**
      * Apply ZSL related consumer usage quirk.
      */
     static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index dbc6fe1..4baa7e8 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -117,6 +117,11 @@
      * AE_TARGET_FPS_RANGE in the capture request.
      */
     virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) = 0;
+
+    /**
+     * Modify the stream use case for this output.
+     */
+    virtual void setStreamUseCase(int64_t streamUseCase) = 0;
 };
 
 // Helper class to organize a synchronized mapping of stream IDs to stream instances
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 6569395..792756ab 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -521,27 +521,35 @@
         if (result->partial_result != 0)
             request.resultExtras.partialResultCount = result->partial_result;
 
-        if ((result->result != nullptr) && !states.legacyClient && !states.overrideToPortrait) {
+        if (result->result != nullptr) {
             camera_metadata_ro_entry entry;
             auto ret = find_camera_metadata_ro_entry(result->result,
                     ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry);
             if ((ret == OK) && (entry.count > 0)) {
                 std::string physicalId(reinterpret_cast<const char *>(entry.data.u8));
-                auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
-                if (deviceInfo != states.physicalDeviceInfoMap.end()) {
-                    auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
-                    if (orientation.count > 0) {
-                        ret = CameraUtils::getRotationTransform(deviceInfo->second,
-                                OutputConfiguration::MIRROR_MODE_AUTO, &request.transform);
-                        if (ret != OK) {
-                            ALOGE("%s: Failed to calculate current stream transformation: %s (%d)",
-                                    __FUNCTION__, strerror(-ret), ret);
+                if (!states.activePhysicalId.empty() && physicalId != states.activePhysicalId) {
+                    states.listener->notifyPhysicalCameraChange(physicalId);
+                }
+                states.activePhysicalId = physicalId;
+
+                if (!states.legacyClient && !states.overrideToPortrait) {
+                    auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
+                    if (deviceInfo != states.physicalDeviceInfoMap.end()) {
+                        auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
+                        if (orientation.count > 0) {
+                            ret = CameraUtils::getRotationTransform(deviceInfo->second,
+                                    OutputConfiguration::MIRROR_MODE_AUTO, &request.transform);
+                            if (ret != OK) {
+                                ALOGE("%s: Failed to calculate current stream transformation: %s "
+                                        "(%d)", __FUNCTION__, strerror(-ret), ret);
+                            }
+                        } else {
+                            ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
                         }
                     } else {
-                        ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
+                        ALOGE("%s: Physical device not found in device info map found!",
+                                __FUNCTION__);
                     }
-                } else {
-                    ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__);
                 }
             }
         }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 019c8a8..d5328c5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -108,6 +108,7 @@
         nsecs_t& minFrameDuration;
         bool& isFixedFps;
         bool overrideToPortrait;
+        std::string &activePhysicalId;
     };
 
     void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 1e103f2..3fa7299 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -376,7 +376,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
         *this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps,
-        mOverrideToPortrait}, mResultMetadataQueue
+        mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
     };
 
     for (const auto& result : results) {
@@ -418,7 +418,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
         *this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps,
-        mOverrideToPortrait}, mResultMetadataQueue
+        mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
         camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
index 816f96b..3c3db97 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
@@ -111,6 +111,7 @@
         listener = mListener.promote();
     }
 
+    std::string activePhysicalId(""); // Unused
     AidlCaptureOutputStates states {
       {mId,
         mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -125,7 +126,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
         *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
-        /*overrideToPortrait*/false}, mResultMetadataQueue
+        /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
 
     std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -157,6 +158,7 @@
         listener = mListener.promote();
     }
 
+    std::string activePhysicalId(""); // Unused
     AidlCaptureOutputStates states {
       {mId,
         mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -171,7 +173,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
         *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
-        /*overrideToPortrait*/false}, mResultMetadataQueue
+        /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
         camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 44c60cf..382b287 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -365,8 +365,8 @@
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
-        mResultMetadataQueue
+        *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+        mActivePhysicalId}, mResultMetadataQueue
     };
 
     //HidlCaptureOutputStates hidlStates {
@@ -428,8 +428,8 @@
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
-        mResultMetadataQueue
+        *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+        mActivePhysicalId}, mResultMetadataQueue
     };
 
     for (const auto& result : results) {
@@ -476,8 +476,8 @@
         mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
-        *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
-        mResultMetadataQueue
+        *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+        mActivePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
         camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
index 705408d..28b2b47 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -92,6 +92,7 @@
         listener = mListener.promote();
     }
 
+    std::string activePhysicalId("");
     HidlCaptureOutputStates states {
       {mId,
         mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -106,7 +107,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
         mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
-        /*overrideToPortrait*/false}, mResultMetadataQueue
+        /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
 
     std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -133,6 +134,7 @@
 
     hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
 
+    std::string activePhysicalId("");
     HidlCaptureOutputStates states {
       {mId,
         mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -147,7 +149,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
         mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
-        /*overrideToPortrait*/false}, mResultMetadataQueue
+        /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
 
     std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -169,6 +171,7 @@
         listener = mListener.promote();
     }
 
+    std::string activePhysicalId("");
     HidlCaptureOutputStates states {
       {mId,
         mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -183,7 +186,7 @@
         mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
         mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
         mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
-        /*overrideToPortrait*/false}, mResultMetadataQueue
+        /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
     };
     for (const auto& msg : msgs) {
         camera3::notify(states, msg);