Merge "Add vendorExtension implementation for all unknown parameters" am: 253b5e908c am: 5d16383852
Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/2470061
Change-Id: I04f5b42a6ea6c18b77f4fd89574394ba5f7ac5f9
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 23d90cc..02047ae 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -696,7 +696,7 @@
CameraMetadata rawMetadata;
int targetSdkVersion = android_get_application_target_sdk_version();
binder::Status serviceRet = cs->getCameraCharacteristics(String16(cameraIdStr),
- targetSdkVersion, /*overrideToPortrait*/true, &rawMetadata);
+ targetSdkVersion, /*overrideToPortrait*/false, &rawMetadata);
if (!serviceRet.isOk()) {
switch(serviceRet.serviceSpecificErrorCode()) {
case hardware::ICameraService::ERROR_DISCONNECTED:
@@ -748,7 +748,7 @@
binder::Status serviceRet = cs->connectDevice(
callbacks, String16(cameraId), String16(""), {},
hardware::ICameraService::USE_CALLING_UID, /*oomScoreOffset*/0,
- targetSdkVersion, /*overrideToPortrait*/true, /*out*/&deviceRemote);
+ targetSdkVersion, /*overrideToPortrait*/false, /*out*/&deviceRemote);
if (!serviceRet.isOk()) {
ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string());
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index d4025e5..78ea2a1 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -42,9 +42,15 @@
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
- mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
- mSize(hasData ? (bpp * width * height) : 0),
- mIccSize(iccSize), mBitDepth(bitDepth) {
+ mRotationAngle(angle), mBytesPerPixel(bpp), mIccSize(iccSize),
+ mBitDepth(bitDepth) {
+ uint32_t multVal;
+ mRowBytes = __builtin_mul_overflow(bpp, width, &multVal) ? 0 : multVal;
+ mSize = __builtin_mul_overflow(multVal, height, &multVal) ? 0 : multVal;
+ if (hasData && (mRowBytes == 0 || mSize == 0)) {
+ ALOGE("Frame rowBytes/ size overflow %dx%d bpp %d", width, height, bpp);
+ android_errorWriteLog(0x534e4554, "233006499");
+ }
}
void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index f64aedf..9955862 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -16,11 +16,13 @@
"Pose.cpp",
"PoseBias.cpp",
"PoseDriftCompensator.cpp",
+ "PosePredictor.cpp",
"PoseRateLimiter.cpp",
"QuaternionUtil.cpp",
"ScreenHeadFusion.cpp",
"StillnessDetector.cpp",
"Twist.cpp",
+ "VectorRecorder.cpp",
],
shared_libs: [
"libaudioutils",
@@ -35,6 +37,15 @@
export_header_lib_headers: [
"libeigen",
],
+ cflags: [
+ "-Wthread-safety",
+ ],
+ product_variables: {
+ debuggable: {
+ // enable experiments only in userdebug and eng builds
+ cflags: ["-DENABLE_VERIFICATION"],
+ },
+ },
}
cc_library {
@@ -76,6 +87,7 @@
"Pose-test.cpp",
"PoseBias-test.cpp",
"PoseDriftCompensator-test.cpp",
+ "PosePredictor.cpp",
"PoseRateLimiter-test.cpp",
"QuaternionUtil-test.cpp",
"ScreenHeadFusion-test.cpp",
@@ -84,6 +96,7 @@
],
shared_libs: [
"libaudioutils",
+ "libbase", // StringAppendF
"libheadtracking",
],
}
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
index 299192f..5190f52 100644
--- a/media/libheadtracking/HeadTrackingProcessor-test.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -15,10 +15,10 @@
*/
#include "media/HeadTrackingProcessor.h"
+#include "media/QuaternionUtil.h"
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
@@ -82,6 +82,8 @@
std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+ processor->setPosePredictorType(PosePredictorType::TWIST);
+
// Establish a baseline for the drift compensators.
processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
processor->setWorldToScreenPose(0, Pose3f());
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index 101b825..54d08d2 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -18,10 +18,11 @@
#include <android-base/stringprintf.h>
#include <audio_utils/SimpleLog.h>
#include "media/HeadTrackingProcessor.h"
+#include "media/QuaternionUtil.h"
#include "ModeSelector.h"
#include "PoseBias.h"
-#include "QuaternionUtil.h"
+#include "PosePredictor.h"
#include "ScreenHeadFusion.h"
#include "StillnessDetector.h"
@@ -59,8 +60,8 @@
void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
const Twist3f& headTwist) override {
- Pose3f predictedWorldToHead =
- worldToHead * integrate(headTwist, mOptions.predictionDuration);
+ const Pose3f predictedWorldToHead = mPosePredictor.predict(
+ timestamp, worldToHead, headTwist, mOptions.predictionDuration);
mHeadPoseBias.setInput(predictedWorldToHead);
mHeadStillnessDetector.setInput(timestamp, predictedWorldToHead);
mWorldToHeadTimestamp = timestamp;
@@ -97,7 +98,7 @@
mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
// Whenever the screen is unstable, recenter the head pose.
if (!screenStable) {
- recenter(true, false);
+ recenter(true, false, "calculate: screen movement");
}
mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
worldToLogicalScreen);
@@ -109,7 +110,7 @@
// Auto-recenter.
bool headStable = mHeadStillnessDetector.calculate(timestamp);
if (headStable || !screenStable) {
- recenter(true, false);
+ recenter(true, false, "calculate: head movement");
worldToHead = mHeadPoseBias.getOutput();
}
@@ -139,16 +140,16 @@
HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
- void recenter(bool recenterHead, bool recenterScreen) override {
+ void recenter(bool recenterHead, bool recenterScreen, std::string source) override {
if (recenterHead) {
mHeadPoseBias.recenter();
mHeadStillnessDetector.reset();
- mLocalLog.log("recenter Head");
+ mLocalLog.log("recenter Head from %s", source.c_str());
}
if (recenterScreen) {
mScreenPoseBias.recenter();
mScreenStillnessDetector.reset();
- mLocalLog.log("recenter Screen");
+ mLocalLog.log("recenter Screen from %s", source.c_str());
}
// If a sensor being recentered is included in the current mode, apply rate limiting to
@@ -161,6 +162,10 @@
}
}
+ void setPosePredictorType(PosePredictorType type) override {
+ mPosePredictor.setPosePredictorType(type);
+ }
+
std::string toString_l(unsigned level) const override {
std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "HeadTrackingProcessor:\n";
@@ -186,6 +191,7 @@
prefixSpace.c_str(), mOptions.screenStillnessRotationalThreshold);
ss += mModeSelector.toString(level + 1);
ss += mRateLimiter.toString(level + 1);
+ ss += mPosePredictor.toString(level + 1);
ss.append(prefixSpace + "ReCenterHistory:\n");
ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), mMaxLocalLogLine);
return ss;
@@ -207,6 +213,7 @@
ScreenHeadFusion mScreenHeadFusion;
ModeSelector mModeSelector;
PoseRateLimiter mRateLimiter;
+ PosePredictor mPosePredictor;
static constexpr std::size_t mMaxLocalLogLine = 10;
SimpleLog mLocalLog{mMaxLocalLogLine};
};
@@ -230,5 +237,26 @@
return "EnumNotImplemented";
};
+std::string toString(PosePredictorType posePredictorType) {
+ switch (posePredictorType) {
+ case PosePredictorType::AUTO: return "AUTO";
+ case PosePredictorType::LAST: return "LAST";
+ case PosePredictorType::TWIST: return "TWIST";
+ case PosePredictorType::LEAST_SQUARES: return "LEAST_SQUARES";
+ }
+ return "UNKNOWN" + std::to_string((int)posePredictorType);
+}
+
+bool isValidPosePredictorType(PosePredictorType posePredictorType) {
+ switch (posePredictorType) {
+ case PosePredictorType::AUTO:
+ case PosePredictorType::LAST:
+ case PosePredictorType::TWIST:
+ case PosePredictorType::LEAST_SQUARES:
+ return true;
+ }
+ return false;
+}
+
} // namespace media
} // namespace android
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
index a136e6b..6925908 100644
--- a/media/libheadtracking/ModeSelector-test.cpp
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -18,7 +18,7 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
index 6277090..7ee21b3 100644
--- a/media/libheadtracking/ModeSelector.cpp
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -117,10 +117,12 @@
std::string ModeSelector::toString(unsigned level) const {
std::string prefixSpace(level, ' ');
std::string ss(prefixSpace);
- StringAppendF(&ss, "ModeSelector: ScreenToStage %s\n",
- mScreenToStage.toString().c_str());
- ss.append(prefixSpace + "Mode downgrade history:\n");
- ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), sMaxLocalLogLine);
+ ss.append("ModeSelector: ScreenToStage ")
+ .append(mScreenToStage.toString())
+ .append("\n")
+ .append(prefixSpace)
+ .append("Mode change history:\n")
+ .append(mLocalLog.dumpToString((prefixSpace + " ").c_str(), sMaxLocalLogLine));
return ss;
}
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
index a9e18ce..29dba29 100644
--- a/media/libheadtracking/Pose-test.cpp
+++ b/media/libheadtracking/Pose-test.cpp
@@ -18,7 +18,7 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
using android::media::Pose3f;
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
index 4a4b56a..e03725b 100644
--- a/media/libheadtracking/Pose.cpp
+++ b/media/libheadtracking/Pose.cpp
@@ -16,8 +16,8 @@
#include <android-base/stringprintf.h>
#include "media/Pose.h"
+#include "media/QuaternionUtil.h"
#include "media/Twist.h"
-#include "QuaternionUtil.h"
namespace android {
namespace media {
diff --git a/media/libheadtracking/PoseBias-test.cpp b/media/libheadtracking/PoseBias-test.cpp
index 9f42a2c..659dda0 100644
--- a/media/libheadtracking/PoseBias-test.cpp
+++ b/media/libheadtracking/PoseBias-test.cpp
@@ -17,7 +17,8 @@
#include <gtest/gtest.h>
#include "PoseBias.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
index df0a05f..521e3eb 100644
--- a/media/libheadtracking/PoseDriftCompensator-test.cpp
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -18,7 +18,8 @@
#include <cmath>
#include "PoseDriftCompensator.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
index 0e90cad..2775790 100644
--- a/media/libheadtracking/PoseDriftCompensator.cpp
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -18,7 +18,7 @@
#include <cmath>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
namespace android {
namespace media {
diff --git a/media/libheadtracking/PosePredictor.cpp b/media/libheadtracking/PosePredictor.cpp
new file mode 100644
index 0000000..f67a966
--- /dev/null
+++ b/media/libheadtracking/PosePredictor.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PosePredictor.h"
+
+namespace android::media {
+
+namespace {
+#ifdef ENABLE_VERIFICATION
+constexpr bool kEnableVerification = true;
+constexpr std::array<int, 3> kLookAheadMs{ 50, 100, 200 };
+#else
+constexpr bool kEnableVerification = false;
+constexpr std::array<int, 0> kLookAheadMs{};
+#endif
+
+} // namespace
+
+void LeastSquaresPredictor::add(int64_t atNs, const Pose3f& pose, const Twist3f& twist)
+{
+ (void)twist;
+ mLastAtNs = atNs;
+ mLastPose = pose;
+ const auto q = pose.rotation();
+ const double datNs = static_cast<double>(atNs);
+ mRw.add({datNs, q.w()});
+ mRx.add({datNs, q.x()});
+ mRy.add({datNs, q.y()});
+ mRz.add({datNs, q.z()});
+}
+
+Pose3f LeastSquaresPredictor::predict(int64_t atNs) const
+{
+ if (mRw.getN() < kMinimumSamplesForPrediction) return mLastPose;
+
+ /*
+ * Using parametric form, we have q(t) = { w(t), x(t), y(t), z(t) }.
+ * We compute the least squares prediction of w, x, y, z.
+ */
+ const double dLookahead = static_cast<double>(atNs);
+ Eigen::Quaternionf lsq(
+ mRw.getYFromX(dLookahead),
+ mRx.getYFromX(dLookahead),
+ mRy.getYFromX(dLookahead),
+ mRz.getYFromX(dLookahead));
+
+ /*
+ * We cheat here, since the result lsq is the least squares prediction
+ * in H (arbitrary quaternion), not the least squares prediction in
+ * SO(3) (unit quaternion).
+ *
+ * In other words, the result for lsq is most likely not a unit quaternion.
+ * To solve this, we normalize, thereby selecting the closest unit quaternion
+ * in SO(3) to the prediction in H.
+ */
+ lsq.normalize();
+ return Pose3f(lsq);
+}
+
+void LeastSquaresPredictor::reset() {
+ mLastAtNs = {};
+ mLastPose = {};
+ mRw.reset();
+ mRx.reset();
+ mRy.reset();
+ mRz.reset();
+}
+
+std::string LeastSquaresPredictor::toString(size_t index) const {
+ std::string s(index, ' ');
+ s.append("LeastSquaresPredictor using alpha: ")
+ .append(std::to_string(mAlpha))
+ .append(" last pose: ")
+ .append(mLastPose.toString())
+ .append("\n");
+ return s;
+}
+
+// Formatting
+static inline std::vector<size_t> createDelimiterIdx(size_t predictors, size_t lookaheads) {
+ if (predictors == 0) return {};
+ --predictors;
+ std::vector<size_t> delimiterIdx(predictors);
+ for (size_t i = 0; i < predictors; ++i) {
+ delimiterIdx[i] = (i + 1) * lookaheads;
+ }
+ return delimiterIdx;
+}
+
+PosePredictor::PosePredictor()
+ : mPredictors{ // must match switch in getCurrentPredictor()
+ std::make_shared<LastPredictor>(),
+ std::make_shared<TwistPredictor>(),
+ std::make_shared<LeastSquaresPredictor>(),
+ }
+ , mLookaheadMs(kLookAheadMs.begin(), kLookAheadMs.end())
+ , mVerifiers(std::size(mLookaheadMs) * std::size(mPredictors))
+ , mDelimiterIdx(createDelimiterIdx(std::size(mPredictors), std::size(mLookaheadMs)))
+ , mPredictionRecorder(
+ std::size(mVerifiers) /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ mDelimiterIdx)
+ , mPredictionDurableRecorder(
+ std::size(mVerifiers) /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ mDelimiterIdx)
+ {
+}
+
+Pose3f PosePredictor::predict(
+ int64_t timestampNs, const Pose3f& pose, const Twist3f& twist, float predictionDurationNs)
+{
+ if (timestampNs - mLastTimestampNs > kMaximumSampleIntervalBeforeResetNs) {
+ for (const auto& predictor : mPredictors) {
+ predictor->reset();
+ }
+ ++mResets;
+ }
+ mLastTimestampNs = timestampNs;
+
+ auto selectedPredictor = getCurrentPredictor();
+ if constexpr (kEnableVerification) {
+ // Update all Predictors
+ for (const auto& predictor : mPredictors) {
+ predictor->add(timestampNs, pose, twist);
+ }
+
+ // Update Verifiers and calculate errors
+ std::vector<float> error(std::size(mVerifiers));
+ for (size_t i = 0; i < mLookaheadMs.size(); ++i) {
+ constexpr float RADIAN_TO_DEGREES = 180 / M_PI;
+ const int64_t atNs =
+ timestampNs + mLookaheadMs[i] * PosePredictorVerifier::kMillisToNanos;
+
+ for (size_t j = 0; j < mPredictors.size(); ++j) {
+ const size_t idx = i * std::size(mPredictors) + j;
+ mVerifiers[idx].verifyActualPose(timestampNs, pose);
+ mVerifiers[idx].addPredictedPose(atNs, mPredictors[j]->predict(atNs));
+ error[idx] = RADIAN_TO_DEGREES * mVerifiers[idx].lastError();
+ }
+ }
+ // Record errors
+ mPredictionRecorder.record(error);
+ mPredictionDurableRecorder.record(error);
+ } else /* constexpr */ {
+ selectedPredictor->add(timestampNs, pose, twist);
+ }
+
+ // Deliver prediction
+ const int64_t predictionTimeNs = timestampNs + (int64_t)predictionDurationNs;
+ return selectedPredictor->predict(predictionTimeNs);
+}
+
+void PosePredictor::setPosePredictorType(PosePredictorType type) {
+ if (!isValidPosePredictorType(type)) return;
+ if (type == mSetType) return;
+ mSetType = type;
+ if (type == android::media::PosePredictorType::AUTO) {
+ type = android::media::PosePredictorType::LEAST_SQUARES;
+ }
+ if (type != mCurrentType) {
+ mCurrentType = type;
+ if constexpr (!kEnableVerification) {
+ // Verification keeps all predictors up-to-date.
+ // If we don't enable verification, we must reset the current predictor.
+ getCurrentPredictor()->reset();
+ }
+ }
+}
+
+std::string PosePredictor::toString(size_t index) const {
+ std::string prefixSpace(index, ' ');
+ std::string ss(prefixSpace);
+ ss.append("PosePredictor:\n")
+ .append(prefixSpace)
+ .append(" Current Prediction Type: ")
+ .append(android::media::toString(mCurrentType))
+ .append("\n")
+ .append(prefixSpace)
+ .append(" Resets: ")
+ .append(std::to_string(mResets))
+ .append("\n")
+ .append(getCurrentPredictor()->toString(index + 1));
+ if constexpr (kEnableVerification) {
+ // dump verification
+ ss.append(prefixSpace)
+ .append(" Prediction abs error (L1) degrees [ type (last twist least-squares) x ( ");
+ for (size_t i = 0; i < mLookaheadMs.size(); ++i) {
+ if (i > 0) ss.append(" : ");
+ ss.append(std::to_string(mLookaheadMs[i]));
+ }
+ std::vector<float> cumulativeAverageErrors(std::size(mVerifiers));
+ for (size_t i = 0; i < cumulativeAverageErrors.size(); ++i) {
+ cumulativeAverageErrors[i] = mVerifiers[i].cumulativeAverageError();
+ }
+ ss.append(" ) ms ]\n")
+ .append(prefixSpace)
+ .append(" Cumulative Average Error:\n")
+ .append(prefixSpace)
+ .append(" ")
+ .append(VectorRecorder::toString(cumulativeAverageErrors, mDelimiterIdx, "%.3g"))
+ .append("\n")
+ .append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mPredictionDurableRecorder.toString(index + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mPredictionRecorder.toString(index + 3));
+ }
+ return ss;
+}
+
+std::shared_ptr<PredictorBase> PosePredictor::getCurrentPredictor() const {
+ // we don't use a map here, we look up directly
+ switch (mCurrentType) {
+ default:
+ case android::media::PosePredictorType::LAST:
+ return mPredictors[0];
+ case android::media::PosePredictorType::TWIST:
+ return mPredictors[1];
+ case android::media::PosePredictorType::AUTO: // shouldn't occur here.
+ case android::media::PosePredictorType::LEAST_SQUARES:
+ return mPredictors[2];
+ }
+}
+
+} // namespace android::media
diff --git a/media/libheadtracking/PosePredictor.h b/media/libheadtracking/PosePredictor.h
new file mode 100644
index 0000000..06983cc
--- /dev/null
+++ b/media/libheadtracking/PosePredictor.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "PosePredictorVerifier.h"
+#include <memory>
+#include <audio_utils/Statistics.h>
+#include <media/PosePredictorType.h>
+#include <media/Twist.h>
+#include <media/VectorRecorder.h>
+
+namespace android::media {
+
+// Interface for generic pose predictors
+class PredictorBase {
+public:
+ virtual ~PredictorBase() = default;
+ virtual void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) = 0;
+ virtual Pose3f predict(int64_t atNs) const = 0;
+ virtual void reset() = 0;
+ virtual std::string toString(size_t index) const = 0;
+};
+
+/**
+ * LastPredictor uses the last sample Pose for prediction
+ *
+ * This class is not thread-safe.
+ */
+class LastPredictor : public PredictorBase {
+public:
+ void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override {
+ (void)atNs;
+ (void)twist;
+ mLastPose = pose;
+ }
+
+ Pose3f predict(int64_t atNs) const override {
+ (void)atNs;
+ return mLastPose;
+ }
+
+ void reset() override {
+ mLastPose = {};
+ }
+
+ std::string toString(size_t index) const override {
+ std::string s(index, ' ');
+ s.append("LastPredictor using last pose: ")
+ .append(mLastPose.toString())
+ .append("\n");
+ return s;
+ }
+
+private:
+ Pose3f mLastPose;
+};
+
+/**
+ * TwistPredictor uses the last sample Twist and Pose for prediction
+ *
+ * This class is not thread-safe.
+ */
+class TwistPredictor : public PredictorBase {
+public:
+ void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override {
+ mLastAtNs = atNs;
+ mLastPose = pose;
+ mLastTwist = twist;
+ }
+
+ Pose3f predict(int64_t atNs) const override {
+ return mLastPose * integrate(mLastTwist, atNs - mLastAtNs);
+ }
+
+ void reset() override {
+ mLastAtNs = {};
+ mLastPose = {};
+ mLastTwist = {};
+ }
+
+ std::string toString(size_t index) const override {
+ std::string s(index, ' ');
+ s.append("TwistPredictor using last pose: ")
+ .append(mLastPose.toString())
+ .append(" last twist: ")
+ .append(mLastTwist.toString())
+ .append("\n");
+ return s;
+ }
+
+private:
+ int64_t mLastAtNs{};
+ Pose3f mLastPose;
+ Twist3f mLastTwist;
+};
+
+
+/**
+ * LeastSquaresPredictor uses the Pose history for prediction.
+ *
+ * A exponential weighted least squares is used.
+ *
+ * This class is not thread-safe.
+ */
+class LeastSquaresPredictor : public PredictorBase {
+public:
+ // alpha is the exponential decay.
+ LeastSquaresPredictor(double alpha = kDefaultAlphaEstimator)
+ : mAlpha(alpha)
+ , mRw(alpha)
+ , mRx(alpha)
+ , mRy(alpha)
+ , mRz(alpha)
+ {}
+
+ void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override;
+ Pose3f predict(int64_t atNs) const override;
+ void reset() override;
+ std::string toString(size_t index) const override;
+
+private:
+ const double mAlpha;
+ int64_t mLastAtNs{};
+ Pose3f mLastPose;
+ static constexpr double kDefaultAlphaEstimator = 0.5;
+ static constexpr size_t kMinimumSamplesForPrediction = 4;
+ audio_utils::LinearLeastSquaresFit<double> mRw;
+ audio_utils::LinearLeastSquaresFit<double> mRx;
+ audio_utils::LinearLeastSquaresFit<double> mRy;
+ audio_utils::LinearLeastSquaresFit<double> mRz;
+};
+
+/*
+ * PosePredictor predicts the pose given sensor input at a time in the future.
+ *
+ * This class is not thread safe.
+ */
+class PosePredictor {
+public:
+ PosePredictor();
+
+ Pose3f predict(int64_t timestampNs, const Pose3f& pose, const Twist3f& twist,
+ float predictionDurationNs);
+
+ void setPosePredictorType(PosePredictorType type);
+
+ // convert predictions to a printable string
+ std::string toString(size_t index) const;
+
+private:
+ static constexpr int64_t kMaximumSampleIntervalBeforeResetNs =
+ 300'000'000;
+
+ // Predictors
+ const std::vector<std::shared_ptr<PredictorBase>> mPredictors;
+
+ // Verifiers, create one for an array of future lookaheads for comparison.
+ const std::vector<int> mLookaheadMs;
+
+ std::vector<PosePredictorVerifier> mVerifiers;
+
+ const std::vector<size_t> mDelimiterIdx;
+
+ // Recorders
+ media::VectorRecorder mPredictionRecorder{
+ std::size(mVerifiers) /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ mDelimiterIdx};
+ media::VectorRecorder mPredictionDurableRecorder{
+ std::size(mVerifiers) /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ mDelimiterIdx};
+
+ // Status
+
+ // SetType is the externally set predictor type. It may include AUTO.
+ PosePredictorType mSetType = PosePredictorType::LEAST_SQUARES;
+
+ // CurrentType is the actual predictor type used by this class.
+ // It does not include AUTO because that metatype means the class
+ // chooses the best predictor type based on sensor statistics.
+ PosePredictorType mCurrentType = PosePredictorType::LEAST_SQUARES;
+
+ int64_t mResets{};
+ int64_t mLastTimestampNs{};
+
+ // Returns current predictor
+ std::shared_ptr<PredictorBase> getCurrentPredictor() const;
+};
+
+} // namespace android::media
diff --git a/media/libheadtracking/PosePredictorVerifier.h b/media/libheadtracking/PosePredictorVerifier.h
new file mode 100644
index 0000000..6b4a357
--- /dev/null
+++ b/media/libheadtracking/PosePredictorVerifier.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+#include <audio_utils/Statistics.h>
+#include <media/Pose.h>
+
+namespace android::media {
+
+/**
+ * PosePredictorVerifier is used to validate predictions
+ *
+ * This class is not thread-safe
+ */
+class PosePredictorVerifier {
+public:
+ std::string toString() const {
+ return mErrorStats.toString();
+ }
+
+ static constexpr int64_t kMillisToNanos = 1000000;
+
+ void verifyActualPose(int64_t timestampNs, const Pose3f& pose) {
+ for (auto it = mPredictions.begin(); it != mPredictions.end();) {
+ if (it->first < timestampNs) {
+ it = mPredictions.erase(it);
+ } else {
+ int64_t dt = it->first - timestampNs;
+ if (std::abs(dt) < 10 * kMillisToNanos) {
+ const float angle = pose.rotation().angularDistance(it->second.rotation());
+ const float error = std::abs(angle); // L1 (absolute difference) here.
+ mLastError = error;
+ mErrorStats.add(error);
+ }
+ break;
+ }
+ }
+ }
+
+ void addPredictedPose(int64_t atNs, const Pose3f& pose) {
+ mPredictions.emplace_back(atNs, pose);
+ }
+
+ float lastError() const {
+ return mLastError;
+ }
+
+ float cumulativeAverageError() const {
+ return mErrorStats.getMean();
+ }
+
+private:
+ static constexpr double kCumulativeErrorAlpha = 0.999;
+ std::deque<std::pair<int64_t, Pose3f>> mPredictions;
+ float mLastError{};
+ android::audio_utils::Statistics<double> mErrorStats{kCumulativeErrorAlpha};
+};
+
+} // namespace androd::media
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
index f306183..ded874a 100644
--- a/media/libheadtracking/PoseRateLimiter-test.cpp
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -17,7 +17,8 @@
#include <gtest/gtest.h>
#include "PoseRateLimiter.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
index e79e54a..cfeca00 100644
--- a/media/libheadtracking/QuaternionUtil-test.cpp
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -16,7 +16,7 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
using Eigen::Quaternionf;
@@ -51,6 +51,92 @@
EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
}
+// Float precision necessitates this precision (1e-4f fails)
+constexpr float NEAR = 1e-3f;
+
+TEST(QuaternionUtil, quaternionToAngles_basic) {
+ float pitch, roll, yaw;
+
+ // angles as reported.
+ // choose 11 angles between -M_PI / 2 to M_PI / 2
+ for (int step = -5; step <= 5; ++step) {
+ const float angle = M_PI * step * 0.1f;
+
+ quaternionToAngles(rotationVectorToQuaternion({angle, 0.f, 0.f}), &pitch, &roll, &yaw);
+ EXPECT_NEAR(angle, pitch, NEAR);
+ EXPECT_NEAR(0.f, roll, NEAR);
+ EXPECT_NEAR(0.f, yaw, NEAR);
+
+ quaternionToAngles(rotationVectorToQuaternion({0.f, angle, 0.f}), &pitch, &roll, &yaw);
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(angle, roll, NEAR);
+ EXPECT_NEAR(0.f, yaw, NEAR);
+
+ quaternionToAngles(rotationVectorToQuaternion({0.f, 0.f, angle}), &pitch, &roll, &yaw);
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(0.f, roll, NEAR);
+ EXPECT_NEAR(angle, yaw, NEAR);
+ }
+
+ // Generates a debug string
+ const std::string s = quaternionToAngles<true /* DEBUG */>(
+ rotationVectorToQuaternion({M_PI, 0.f, 0.f}), &pitch, &roll, &yaw);
+ ASSERT_FALSE(s.empty());
+}
+
+TEST(QuaternionUtil, quaternionToAngles_zaxis) {
+ float pitch, roll, yaw;
+
+ for (int rot_step = -10; rot_step <= 10; ++rot_step) {
+ const float rot_angle = M_PI * rot_step * 0.1f;
+ // pitch independent of world Z rotation
+
+ // We don't test the boundaries of pitch +-M_PI/2 as roll can become
+ // degenerate and atan(0, 0) may report 0, PI, or -PI.
+ for (int step = -4; step <= 4; ++step) {
+ const float angle = M_PI * step * 0.1f;
+ auto q = rotationVectorToQuaternion({angle, 0.f, 0.f});
+ auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle});
+
+ // Sequential active rotations (on world frame) compose as R_2 * R_1.
+ quaternionToAngles(world_z * q, &pitch, &roll, &yaw);
+
+ EXPECT_NEAR(angle, pitch, NEAR);
+ EXPECT_NEAR(0.f, roll, NEAR);
+ }
+
+ // roll independent of world Z rotation
+ for (int step = -5; step <= 5; ++step) {
+ const float angle = M_PI * step * 0.1f;
+ auto q = rotationVectorToQuaternion({0.f, angle, 0.f});
+ auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle});
+
+ // Sequential active rotations (on world frame) compose as R_2 * R_1.
+ quaternionToAngles(world_z * q, &pitch, &roll, &yaw);
+
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(angle, roll, NEAR);
+
+ // Convert extrinsic (world-based) active rotations to a sequence of
+ // intrinsic rotations (each rotation based off of previous rotation
+ // frame).
+ //
+ // R_1 * R_intrinsic = R_extrinsic * R_1
+ // implies
+ // R_intrinsic = (R_1)^-1 R_extrinsic R_1
+ //
+ auto world_z_intrinsic = rotationVectorToQuaternion(
+ q.inverse() * Vector3f(0.f, 0.f, rot_angle));
+
+ // Sequential intrinsic rotations compose as R_1 * R_2.
+ quaternionToAngles(q * world_z_intrinsic, &pitch, &roll, &yaw);
+
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(angle, roll, NEAR);
+ }
+ }
+}
+
} // namespace
} // namespace media
} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
index 5d090de..e245c80 100644
--- a/media/libheadtracking/QuaternionUtil.cpp
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include <cassert>
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
deleted file mode 100644
index f7a2ca9..0000000
--- a/media/libheadtracking/QuaternionUtil.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#pragma once
-
-#include <Eigen/Geometry>
-
-namespace android {
-namespace media {
-
-/**
- * Converts a rotation vector to an equivalent quaternion.
- * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
- * magnitude the rotation angle (in radians) around that axis.
- */
-Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
-
-/**
- * Converts a quaternion to an equivalent rotation vector.
- * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
- * magnitude the rotation angle (in radians) around that axis.
- */
-Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
-
-/**
- * Returns a quaternion representing a rotation around the X-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateX(float angle);
-
-/**
- * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateY(float angle);
-
-/**
- * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateZ(float angle);
-
-} // namespace media
-} // namespace android
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
index 31d469c..8a29027 100644
--- a/media/libheadtracking/SensorPoseProvider.cpp
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -32,7 +32,7 @@
#include <sensor/SensorManager.h>
#include <utils/Looper.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
namespace android {
namespace media {
diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp
index b6cd479..56e7b4e 100644
--- a/media/libheadtracking/StillnessDetector-test.cpp
+++ b/media/libheadtracking/StillnessDetector-test.cpp
@@ -16,8 +16,9 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
#include "StillnessDetector.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
index 7984e1e..9fbf81f 100644
--- a/media/libheadtracking/Twist-test.cpp
+++ b/media/libheadtracking/Twist-test.cpp
@@ -16,9 +16,7 @@
#include "media/Twist.h"
-#include <gtest/gtest.h>
-
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
using Eigen::Quaternionf;
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
index 664c4d5..fdec694 100644
--- a/media/libheadtracking/Twist.cpp
+++ b/media/libheadtracking/Twist.cpp
@@ -15,8 +15,8 @@
*/
#include "media/Twist.h"
-
-#include "QuaternionUtil.h"
+#include <android-base/stringprintf.h>
+#include "media/QuaternionUtil.h"
namespace android {
namespace media {
@@ -39,5 +39,11 @@
return os;
}
+std::string Twist3f::toString() const {
+ return base::StringPrintf("[%0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f]",
+ mTranslationalVelocity[0], mTranslationalVelocity[1], mTranslationalVelocity[2],
+ mRotationalVelocity[0], mRotationalVelocity[1], mRotationalVelocity[2]);
+}
+
} // namespace media
} // namespace android
diff --git a/media/libheadtracking/VectorRecorder.cpp b/media/libheadtracking/VectorRecorder.cpp
new file mode 100644
index 0000000..5c87d05
--- /dev/null
+++ b/media/libheadtracking/VectorRecorder.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/VectorRecorder.h"
+
+namespace android::media {
+
+// Convert data to string with level indentation.
+// No need for a lock as the SimpleLog is thread-safe.
+std::string VectorRecorder::toString(size_t indent) const {
+ return mRecordLog.dumpToString(std::string(indent, ' ').c_str(), mMaxLocalLogLine);
+}
+
+// Record into local log when it is time.
+void VectorRecorder::record(const std::vector<float>& record) {
+ if (record.size() != mVectorSize) return;
+
+ // Protect against concurrent calls to record().
+ std::lock_guard lg(mLock);
+
+ // if it is time, record average data and reset.
+ if (shouldRecordLog_l()) {
+ sumToAverage_l();
+ mRecordLog.log(
+ "mean: %s, min: %s, max %s, calculated %zu samples in %0.4f second(s)",
+ toString(mSum, mDelimiterIdx, mFormatString.c_str()).c_str(),
+ toString(mMin, mDelimiterIdx, mFormatString.c_str()).c_str(),
+ toString(mMax, mDelimiterIdx, mFormatString.c_str()).c_str(),
+ mNumberOfSamples,
+ mNumberOfSecondsSinceFirstSample.count());
+ resetRecord_l();
+ }
+
+ // update stream average.
+ if (mNumberOfSamples++ == 0) {
+ mFirstSampleTimestamp = std::chrono::steady_clock::now();
+ for (size_t i = 0; i < mVectorSize; ++i) {
+ const float value = record[i];
+ mSum[i] += value;
+ mMax[i] = value;
+ mMin[i] = value;
+ }
+ } else {
+ for (size_t i = 0; i < mVectorSize; ++i) {
+ const float value = record[i];
+ mSum[i] += value;
+ mMax[i] = std::max(mMax[i], value);
+ mMin[i] = std::min(mMin[i], value);
+ }
+ }
+}
+
+bool VectorRecorder::shouldRecordLog_l() {
+ mNumberOfSecondsSinceFirstSample = std::chrono::duration_cast<std::chrono::seconds>(
+ std::chrono::steady_clock::now() - mFirstSampleTimestamp);
+ return mNumberOfSecondsSinceFirstSample >= mRecordThreshold;
+}
+
+void VectorRecorder::resetRecord_l() {
+ mSum.assign(mVectorSize, 0);
+ mMax.assign(mVectorSize, 0);
+ mMin.assign(mVectorSize, 0);
+ mNumberOfSamples = 0;
+ mNumberOfSecondsSinceFirstSample = std::chrono::seconds(0);
+}
+
+void VectorRecorder::sumToAverage_l() {
+ if (mNumberOfSamples == 0) return;
+ const float reciprocal = 1.f / mNumberOfSamples;
+ for (auto& p : mSum) {
+ p *= reciprocal;
+ }
+}
+
+} // namespace android::media
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 8ef8ab0..d2b78f2 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -19,6 +19,7 @@
#include "HeadTrackingMode.h"
#include "Pose.h"
+#include "PosePredictorType.h"
#include "Twist.h"
namespace android {
@@ -95,7 +96,13 @@
/**
* This causes the current poses for both the head and/or screen to be considered "center".
*/
- virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
+ virtual void recenter(
+ bool recenterHead = true, bool recenterScreen = true, std::string source = "") = 0;
+
+ /**
+ * Set the predictor type.
+ */
+ virtual void setPosePredictorType(PosePredictorType type) = 0;
/**
* Dump HeadTrackingProcessor parameters under caller lock.
diff --git a/media/libheadtracking/include/media/PosePredictorType.h b/media/libheadtracking/include/media/PosePredictorType.h
new file mode 100644
index 0000000..aa76d5d
--- /dev/null
+++ b/media/libheadtracking/include/media/PosePredictorType.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+
+namespace android::media {
+
+enum class PosePredictorType {
+ /** Use best predictor determined from sensor input */
+ AUTO,
+
+ /** Use last pose for future prediction */
+ LAST,
+
+ /** Use twist angular velocity for future prediction */
+ TWIST,
+
+ /** Use weighted least squares history of prior poses (ignoring twist) */
+ LEAST_SQUARES,
+};
+
+std::string toString(PosePredictorType posePredictorType);
+bool isValidPosePredictorType(PosePredictorType posePredictorType);
+
+} // namespace android::media
diff --git a/media/libheadtracking/include/media/QuaternionUtil.h b/media/libheadtracking/include/media/QuaternionUtil.h
new file mode 100644
index 0000000..a711d17
--- /dev/null
+++ b/media/libheadtracking/include/media/QuaternionUtil.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <android-base/stringprintf.h>
+#include <Eigen/Geometry>
+#include <media/Pose.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+/**
+ * Compute separate roll, pitch, and yaw angles from a quaternion
+ *
+ * The roll, pitch, and yaw follow standard 3DOF virtual reality definitions
+ * with angles increasing counter-clockwise by the right hand rule.
+ *
+ * https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ *
+ * The roll, pitch, and yaw angles are calculated separately from the device frame
+ * rotation from the world frame. This is not to be confused with the
+ * intrinsic Euler xyz roll, pitch, yaw 'nautical' angles.
+ *
+ * The input quarternion is the active rotation that transforms the
+ * World/Stage frame to the Head/Screen frame.
+ *
+ * The input quaternion may come from two principal sensors: DEVICE and HEADSET
+ * and are interpreted as below.
+ *
+ * DEVICE SENSOR
+ *
+ * Android sensor stack assumes device coordinates along the x/y axis.
+ *
+ * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_rotation_vector:
+ *
+ * Looking down from the clouds. Android Device coordinate system (not used)
+ * DEVICE --> X (Y goes through top speaker towards the observer)
+ * | Z
+ * V
+ * USER
+ *
+ * Internally within this library, we transform the device sensor coordinate
+ * system by rotating the coordinate system around the X axis by -M_PI/2.
+ * This aligns the device coordinate system to match that of the
+ * Head Tracking sensor (see below), should the user be facing the device in
+ * natural (phone == portrait, tablet == ?) orientation.
+ *
+ * Looking down from the clouds. Spatializer device frame.
+ * Y
+ * ^
+ * |
+ * DEVICE --> X (Z goes through top of the DEVICE towards the observer)
+ *
+ * USER
+ *
+ * The reference world frame is the device in vertical
+ * natural (phone == portrait) orientation with the top pointing straight
+ * up from the ground and the front-to-back direction facing north.
+ * The world frame is presumed locally fixed by magnetic and gravitational reference.
+ *
+ * HEADSET SENSOR
+ * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_head_tracker:
+ *
+ * Looking down from the clouds. Headset frame.
+ * Y
+ * ^
+ * |
+ * USER ---> X
+ * (Z goes through the top of the USER head towards the observer)
+ *
+ * The Z axis goes from the neck to the top of the head, the X axis goes
+ * from the left ear to the right ear, the Y axis goes from the back of the
+ * head through the nose.
+ *
+ * Typically for a headset sensor, the X and Y axes have some arbitrary fixed
+ * reference.
+ *
+ * ROLL
+ * Roll is the counter-clockwise L/R motion around the Y axis (hence ZX plane).
+ * The right hand convention means the plane is ZX not XZ.
+ * This can be considered the azimuth angle in spherical coordinates
+ * with Pitch being the elevation angle.
+ *
+ * Roll has a range of -M_PI to M_PI radians.
+ *
+ * Rolling a device changes between portrait and landscape
+ * modes, and for L/R speakers will limit the amount of crosstalk cancellation.
+ * Roll increases as the device (if vertical like a coin) rolls from left to right.
+ *
+ * By this definition, Roll is less accurate when the device is flat
+ * on a table rather than standing on edge.
+ * When perfectly flat on the table, roll may report as 0, M_PI, or -M_PI
+ * due ambiguity / degeneracy of atan(0, 0) in this case (the device Y axis aligns with
+ * the world Z axis), but exactly flat rarely occurs.
+ *
+ * Roll for a headset is the angle the head is inclined to the right side
+ * (like sleeping).
+ *
+ * PITCH
+ * Pitch is the Surface normal Y deviation (along the Z axis away from the earth).
+ * This can be considered the elevation angle in spherical coordinates using
+ * Roll as the azimuth angle.
+ *
+ * Pitch for a device determines whether the device is "upright" or lying
+ * flat on the table (i.e. surface normal). Pitch is 0 when upright, decreases
+ * as the device top moves away from the user to -M_PI/2 when lying down face up.
+ * Pitch increases from 0 to M_PI/2 when the device tilts towards the user, and is
+ * M_PI/2 degrees when face down.
+ *
+ * Pitch for a headset is the user tilting the head/chin up or down,
+ * like nodding.
+ *
+ * Pitch has a range of -M_PI/2, M_PI/2 radians.
+ *
+ * YAW
+ * Yaw is the rotational component along the earth's XY tangential plane,
+ * where the Z axis points radially away from the earth.
+ *
+ * Yaw has a range of -M_PI to M_PI radians. If used for azimuth angle in
+ * spherical coordinates, the elevation angle may be derived from the Z axis.
+ *
+ * A positive increase means the phone is rotating from right to left
+ * when considered flat on the table.
+ * (headset: the user is rotating their head to look left).
+ * If left speaker or right earbud is pointing straight up or down,
+ * this value is imprecise and Pitch or Roll is a more useful measure.
+ *
+ * Yaw for a device is like spinning a vertical device along the axis of
+ * gravity, like spinning a coin. Yaw increases as the coin / device
+ * spins from right to left, rotating around the Z axis.
+ *
+ * Yaw for a headset is the user turning the head to look left or right
+ * like shaking the head for no. Yaw is the primary angle for a binaural
+ * head tracking device.
+ *
+ * @param q input active rotation Eigen quaternion.
+ * @param pitch output set to pitch if not nullptr
+ * @param roll output set to roll if not nullptr
+ * @param yaw output set to yaw if not nullptr
+ * @return (DEBUG==true) a debug string with intermediate transformation matrix
+ * interpreted as the unit basis vectors.
+ */
+
+// DEBUG returns a debug string for analysis.
+// We save unneeded rotation matrix computation by keeping the DEBUG option constexpr.
+template <bool DEBUG = false>
+auto quaternionToAngles(const Eigen::Quaternionf& q, float *pitch, float *roll, float *yaw) {
+ /*
+ * The quaternion here is the active rotation that transforms from the world frame
+ * to the device frame: the observer remains in the world frame,
+ * and the device (frame) moves.
+ *
+ * We use this to map device coordinates to world coordinates.
+ *
+ * Device: We transform the device right speaker (X == 1), top speaker (Z == 1),
+ * and surface inwards normal (Y == 1) positions to the world frame.
+ *
+ * Headset: We transform the headset right bud (X == 1), top (Z == 1) and
+ * nose normal (Y == 1) positions to the world frame.
+ *
+ * This is the same as the world frame coordinates of the
+ * unit device vector in the X dimension (ux),
+ * unit device vector in the Y dimension (uy),
+ * unit device vector in the Z dimension (uz).
+ *
+ * Rather than doing the rotation on unit vectors individually,
+ * one can simply use the columns of the rotation matrix of
+ * the world-to-body quaternion, so the computation is exceptionally fast.
+ *
+ * Furthermore, Eigen inlines the "toRotationMatrix" method
+ * and we rely on unused expression removal for efficiency
+ * and any elements not used should not be computed.
+ *
+ * Side note: For applying a rotation to several points,
+ * it is more computationally efficient to extract and
+ * use the rotation matrix form than the quaternion.
+ * So use of the rotation matrix is good for many reasons.
+ */
+ const auto rotation = q.toRotationMatrix();
+
+ /*
+ * World location of unit vector right speaker assuming the phone is situated
+ * natural (phone == portrait) mode.
+ * (headset: right bud).
+ *
+ * auto ux = q.rotation() * Eigen::Vector3f{1.f, 0.f, 0.f};
+ * = rotation.col(0);
+ */
+ [[maybe_unused]] const auto ux_0 = rotation.coeff(0, 0);
+ [[maybe_unused]] const auto ux_1 = rotation.coeff(1, 0);
+ [[maybe_unused]] const auto ux_2 = rotation.coeff(2, 0);
+
+ [[maybe_unused]] std::string coordinates;
+ if constexpr (DEBUG) {
+ base::StringAppendF(&coordinates, "ux: %f %f %f", ux_0, ux_1, ux_2);
+ }
+
+ /*
+ * World location of screen-inwards normal assuming the phone is situated
+ * in natural (phone == portrait) mode.
+ * (headset: user nose).
+ *
+ * auto uy = q.rotation() * Eigen::Vector3f{0.f, 1.f, 0.f};
+ * = rotation.col(1);
+ */
+ [[maybe_unused]] const auto uy_0 = rotation.coeff(0, 1);
+ [[maybe_unused]] const auto uy_1 = rotation.coeff(1, 1);
+ [[maybe_unused]] const auto uy_2 = rotation.coeff(2, 1);
+ if constexpr (DEBUG) {
+ base::StringAppendF(&coordinates, "uy: %f %f %f", uy_0, uy_1, uy_2);
+ }
+
+ /*
+ * World location of unit vector top speaker.
+ * (headset: top of head).
+ * auto uz = q.rotation() * Eigen::Vector3f{0.f, 0.f, 1.f};
+ * = rotation.col(2);
+ */
+ [[maybe_unused]] const auto uz_0 = rotation.coeff(0, 2);
+ [[maybe_unused]] const auto uz_1 = rotation.coeff(1, 2);
+ [[maybe_unused]] const auto uz_2 = rotation.coeff(2, 2);
+ if constexpr (DEBUG) {
+ base::StringAppendF(&coordinates, "uz: %f %f %f", uz_0, uz_1, uz_2);
+ }
+
+ // pitch computed from nose world Z coordinate;
+ // hence independent of rotation around world Z.
+ if (pitch != nullptr) {
+ *pitch = asin(std::clamp(uy_2, -1.f, 1.f));
+ }
+
+ // roll computed from head/right world Z coordinate;
+ // hence independent of rotation around world Z.
+ if (roll != nullptr) {
+ // atan2 takes care of implicit scale normalization of Z, X.
+ *roll = -atan2(ux_2, uz_2);
+ }
+
+ // yaw computed from right ear angle projected onto world XY plane
+ // where world Z == 0. This is the rotation around world Z.
+ if (yaw != nullptr) {
+ // atan2 takes care of implicit scale normalization of X, Y.
+ *yaw = atan2(ux_1, ux_0);
+ }
+
+ if constexpr (DEBUG) {
+ return coordinates;
+ }
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h
index 291cea3..51b83d8 100644
--- a/media/libheadtracking/include/media/Twist.h
+++ b/media/libheadtracking/include/media/Twist.h
@@ -66,6 +66,9 @@
return Twist3f(mTranslationalVelocity / s, mRotationalVelocity / s);
}
+ // Convert instance to a string representation.
+ std::string toString() const;
+
private:
Eigen::Vector3f mTranslationalVelocity;
Eigen::Vector3f mRotationalVelocity;
diff --git a/media/libheadtracking/include/media/VectorRecorder.h b/media/libheadtracking/include/media/VectorRecorder.h
new file mode 100644
index 0000000..4103a7d
--- /dev/null
+++ b/media/libheadtracking/include/media/VectorRecorder.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/stringprintf.h>
+#include <android-base/thread_annotations.h>
+#include <audio_utils/SimpleLog.h>
+#include <chrono>
+#include <math.h>
+#include <mutex>
+#include <vector>
+
+namespace android::media {
+
+/**
+ * VectorRecorder records a vector of floats computing the average, max, and min
+ * over given time periods.
+ *
+ * The class is thread-safe.
+ */
+class VectorRecorder {
+ public:
+ /**
+ * @param vectorSize is the size of the vector input.
+ * If the input does not match this size, it is ignored.
+ * @param threshold is the time interval we bucket for averaging.
+ * @param maxLogLine is the number of lines we log. At this
+ * threshold, the oldest line will expire when the new line comes in.
+ * @param delimiterIdx is an optional array of delimiter indices that
+ * replace the ',' with a ':'. For example if delimiterIdx = { 3 } then
+ * the above example would format as [0.00, 0.00, 0.00 : -1.29, -0.50, 15.27].
+ * @param formatString is the sprintf format string for the double converted data
+ * to use.
+ */
+ VectorRecorder(
+ size_t vectorSize, std::chrono::duration<double> threshold, int maxLogLine,
+ std::vector<size_t> delimiterIdx = {},
+ const std::string_view formatString = {})
+ : mVectorSize(vectorSize)
+ , mDelimiterIdx(std::move(delimiterIdx))
+ , mFormatString(formatString)
+ , mRecordLog(maxLogLine)
+ , mRecordThreshold(threshold)
+ {
+ resetRecord_l(); // OK to call - we're in the constructor.
+ }
+
+ /** Convert recorded vector data to string with level indentation */
+ std::string toString(size_t indent) const;
+
+ /**
+ * @brief Record a vector of floats.
+ *
+ * @param record a vector of floats.
+ */
+ void record(const std::vector<float>& record);
+
+ /**
+ * Format vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
+ *
+ * @param delimiterIdx is an optional array of delimiter indices that
+ * replace the ',' with a ':'. For example if delimiterIdx = { 3 } then
+ * the above example would format as [0.00, 0.00, 0.00 : -1.29, -0.50, 15.27].
+ * @param formatString is the sprintf format string for the double converted data
+ * to use.
+ */
+ template <typename T>
+ static std::string toString(const std::vector<T>& record,
+ const std::vector<size_t>& delimiterIdx = {},
+ const char * const formatString = nullptr) {
+ if (record.size() == 0) {
+ return "[]";
+ }
+
+ std::string ss = "[";
+ auto nextDelimiter = delimiterIdx.begin();
+ for (size_t i = 0; i < record.size(); ++i) {
+ if (i > 0) {
+ if (nextDelimiter != delimiterIdx.end()
+ && *nextDelimiter <= i) {
+ ss.append(" : ");
+ ++nextDelimiter;
+ } else {
+ ss.append(", ");
+ }
+ }
+ if (formatString != nullptr && *formatString) {
+ base::StringAppendF(&ss, formatString, static_cast<double>(record[i]));
+ } else {
+ base::StringAppendF(&ss, "%5.2lf", static_cast<double>(record[i]));
+ }
+ }
+ ss.append("]");
+ return ss;
+ }
+
+ private:
+ static constexpr int mMaxLocalLogLine = 10;
+
+ const size_t mVectorSize;
+ const std::vector<size_t> mDelimiterIdx;
+ const std::string mFormatString;
+
+ // Local log for historical vector data.
+ // Locked internally, so does not need mutex below.
+ SimpleLog mRecordLog{mMaxLocalLogLine};
+
+ std::mutex mLock;
+
+ // Time threshold to record vectors in the local log.
+ // Vector data will be recorded into log at least every mRecordThreshold.
+ std::chrono::duration<double> mRecordThreshold GUARDED_BY(mLock);
+
+ // Number of seconds since first sample in mSum.
+ std::chrono::duration<double> mNumberOfSecondsSinceFirstSample GUARDED_BY(mLock);
+
+ // Timestamp of first sample recorded in mSum.
+ std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp GUARDED_BY(mLock);
+
+ // Number of samples in mSum.
+ size_t mNumberOfSamples GUARDED_BY(mLock) = 0;
+
+ std::vector<double> mSum GUARDED_BY(mLock);
+ std::vector<float> mMax GUARDED_BY(mLock);
+ std::vector<float> mMin GUARDED_BY(mLock);
+
+ // Computes mNumberOfSecondsSinceFirstSample, returns true if time to record.
+ bool shouldRecordLog_l() REQUIRES(mLock);
+
+ // Resets the running mNumberOfSamples, mSum, mMax, mMin.
+ void resetRecord_l() REQUIRES(mLock);
+
+ // Convert mSum to an average.
+ void sumToAverage_l() REQUIRES(mLock);
+}; // VectorRecorder
+
+} // namespace android::media
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
index 5378f64..f40ab1c 100644
--- a/services/audiopolicy/engine/common/include/VolumeGroup.h
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -39,7 +39,7 @@
VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; }
void addSupportedAttributes(const audio_attributes_t &attr);
- AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); }
+ AttributesVector getSupportedAttributes() const;
void addSupportedStream(audio_stream_type_t stream);
StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); }
diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
index e189807..f5ffbba 100644
--- a/services/audiopolicy/engine/common/src/VolumeGroup.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
@@ -37,6 +37,17 @@
{
}
+// Used for introspection, e.g. JAVA
+AttributesVector VolumeGroup::getSupportedAttributes() const
+{
+ AttributesVector supportedAttributes = {};
+ for (auto &aa : mGroupVolumeCurves.getAttributes()) {
+ aa.source = AUDIO_SOURCE_INVALID;
+ supportedAttributes.push_back(aa);
+ }
+ return supportedAttributes;
+}
+
void VolumeGroup::dump(String8 *dst, int spaces) const
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 45c5eac..649c63f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -273,10 +273,15 @@
break;
case STRATEGY_PHONE: {
- devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
- if (!devices.isEmpty()) break;
+ // TODO(b/243670205): remove this logic that gives preference to last removable devices
+ // once a UX decision has been made
devices = availableOutputDevices.getFirstDevicesFromTypes(
- getLastRemovableMediaDevices(GROUP_NONE, {AUDIO_DEVICE_OUT_BLE_HEADSET}));
+ getLastRemovableMediaDevices(GROUP_NONE, {
+ // excluding HEARING_AID and BLE_HEADSET because Dialer uses
+ // setCommunicationDevice to select them explicitly
+ AUDIO_DEVICE_OUT_HEARING_AID,
+ AUDIO_DEVICE_OUT_BLE_HEADSET
+ }));
if (!devices.isEmpty()) break;
devices = availableOutputDevices.getFirstDevicesFromTypes({
AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_EARPIECE});
@@ -291,7 +296,9 @@
if ((strategy == STRATEGY_SONIFICATION) ||
(getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
- devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
+ // favor dock over speaker when available
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_SPEAKER});
}
// if SCO headset is connected and we are told to use it, play ringtone over
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index e736677..365c3a4 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2020,6 +2020,10 @@
outputDesc->stop();
return status;
}
+ if (client->hasPreferredDevice()) {
+ // playback activity with preferred device impacts routing occurred, inform upper layers
+ mpClientInterface->onRoutingUpdated();
+ }
if (delayMs != 0) {
usleep(delayMs * 1000);
}
@@ -2265,6 +2269,11 @@
}
sp<TrackClientDescriptor> client = outputDesc->getClient(portId);
+ if (client->hasPreferredDevice(true)) {
+ // playback activity with preferred device impacts routing occurred, inform upper layers
+ mpClientInterface->onRoutingUpdated();
+ }
+
ALOGV("stopOutput() output %d, stream %d, session %d",
outputDesc->mIoHandle, client->stream(), client->session());
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 09b6f3b..281785e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -1816,14 +1816,12 @@
void AudioPolicyService::SensorPrivacyPolicy::registerSelf() {
SensorPrivacyManager spm;
mSensorPrivacyEnabled = spm.isSensorPrivacyEnabled();
- (void)spm.addToggleSensorPrivacyListener(this);
spm.addSensorPrivacyListener(this);
}
void AudioPolicyService::SensorPrivacyPolicy::unregisterSelf() {
SensorPrivacyManager spm;
spm.removeSensorPrivacyListener(this);
- spm.removeToggleSensorPrivacyListener(this);
}
bool AudioPolicyService::SensorPrivacyPolicy::isSensorPrivacyEnabled() {
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 2fe7b9e..9ee9037 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -33,6 +33,7 @@
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/MediaMetricsItem.h>
+#include <media/QuaternionUtil.h>
#include <media/ShmemCompat.h>
#include <mediautils/SchedulingPolicyService.h>
#include <mediautils/ServiceUtilities.h>
@@ -75,6 +76,24 @@
return maxMask;
}
+static std::vector<float> recordFromTranslationRotationVector(
+ const std::vector<float>& trVector) {
+ auto headToStageOpt = Pose3f::fromVector(trVector);
+ if (!headToStageOpt) return {};
+
+ const auto stageToHead = headToStageOpt.value().inverse();
+ const auto stageToHeadTranslation = stageToHead.translation();
+ constexpr float RAD_TO_DEGREE = 180.f / M_PI;
+ std::vector<float> record{
+ stageToHeadTranslation[0], stageToHeadTranslation[1], stageToHeadTranslation[2],
+ 0.f, 0.f, 0.f};
+ media::quaternionToAngles(stageToHead.rotation(), &record[3], &record[4], &record[5]);
+ record[3] *= RAD_TO_DEGREE;
+ record[4] *= RAD_TO_DEGREE;
+ record[5] *= RAD_TO_DEGREE;
+ return record;
+}
+
// ---------------------------------------------------------------------------
class Spatializer::EngineCallbackHandler : public AHandler {
@@ -185,41 +204,6 @@
};
// ---------------------------------------------------------------------------
-
-// Convert recorded sensor data to string with level indentation.
-std::string Spatializer::HeadToStagePoseRecorder::toString(unsigned level) const {
- std::string prefixSpace(level, ' ');
- return mPoseRecordLog.dumpToString((prefixSpace + " ").c_str(), Spatializer::mMaxLocalLogLine);
-}
-
-// Compute sensor data, record into local log when it is time.
-void Spatializer::HeadToStagePoseRecorder::record(const std::vector<float>& headToStage) {
- if (headToStage.size() != mPoseVectorSize) return;
-
- if (mNumOfSampleSinceLastRecord++ == 0) {
- mFirstSampleTimestamp = std::chrono::steady_clock::now();
- }
- // if it's time, do record and reset.
- if (shouldRecordLog()) {
- poseSumToAverage();
- mPoseRecordLog.log(
- "mean: %s, min: %s, max %s, calculated %d samples in %0.4f second(s)",
- Spatializer::toString<double>(mPoseRadianSum, true /* radianToDegree */).c_str(),
- Spatializer::toString<float>(mMinPoseAngle, true /* radianToDegree */).c_str(),
- Spatializer::toString<float>(mMaxPoseAngle, true /* radianToDegree */).c_str(),
- mNumOfSampleSinceLastRecord, mNumOfSecondsSinceLastRecord.count());
- resetRecord();
- }
- // update stream average.
- for (int i = 0; i < mPoseVectorSize; i++) {
- mPoseRadianSum[i] += headToStage[i];
- mMaxPoseAngle[i] = std::max(mMaxPoseAngle[i], headToStage[i]);
- mMinPoseAngle[i] = std::min(mMinPoseAngle[i], headToStage[i]);
- }
- return;
-}
-
-// ---------------------------------------------------------------------------
sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
sp<Spatializer> spatializer;
@@ -590,7 +574,8 @@
}
std::lock_guard lock(mLock);
if (mPoseController != nullptr) {
- mLocalLog.log("%s with screenToStage %s", __func__, toString<float>(screenToStage).c_str());
+ mLocalLog.log("%s with screenToStage %s", __func__,
+ media::VectorRecorder::toString<float>(screenToStage).c_str());
mPoseController->setScreenToStagePose(maybePose.value());
}
return Status::ok();
@@ -771,8 +756,9 @@
callback = mHeadTrackingCallback;
if (mEngine != nullptr) {
setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
- mPoseRecorder.record(headToStage);
- mPoseDurableRecorder.record(headToStage);
+ const auto record = recordFromTranslationRotationVector(headToStage);
+ mPoseRecorder.record(record);
+ mPoseDurableRecorder.record(record);
}
}
@@ -822,8 +808,7 @@
}
}
callback = mHeadTrackingCallback;
- mLocalLog.log("%s: %s, spatializerMode %s", __func__, media::toString(mode).c_str(),
- media::toString(spatializerMode).c_str());
+ mLocalLog.log("%s: updating mode to %s", __func__, media::toString(mode).c_str());
}
if (callback != nullptr) {
callback->onHeadTrackingModeChanged(spatializerMode);
@@ -1048,8 +1033,7 @@
}
std::string Spatializer::toString(unsigned level) const {
- std::string prefixSpace;
- prefixSpace.append(level, ' ');
+ std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "Spatializer:\n";
bool needUnlock = false;
@@ -1105,14 +1089,15 @@
// PostController dump.
if (mPoseController != nullptr) {
- ss += mPoseController->toString(level + 1);
- ss.append(prefixSpace +
- "Sensor data format - [rx, ry, rz, vx, vy, vz] (units-degree, "
- "r-transform, v-angular velocity, x-pitch, y-roll, z-yaw):\n");
- ss.append(prefixSpace + " PerMinuteHistory:\n");
- ss += mPoseDurableRecorder.toString(level + 1);
- ss.append(prefixSpace + " PerSecondHistory:\n");
- ss += mPoseRecorder.toString(level + 1);
+ ss.append(mPoseController->toString(level + 1))
+ .append(prefixSpace)
+ .append("Pose (active stage-to-head) [tx, ty, tz : pitch, roll, yaw]:\n")
+ .append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mPoseDurableRecorder.toString(level + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mPoseRecorder.toString(level + 3));
} else {
ss.append(prefixSpace).append("SpatializerPoseController not exist\n");
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 0f6bafe..4d6c875 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -27,6 +27,7 @@
#include <audio_utils/SimpleLog.h>
#include <math.h>
#include <media/AudioEffect.h>
+#include <media/VectorRecorder.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/stagefright/foundation/ALooper.h>
#include <system/audio_effects/effect_spatializer.h>
@@ -172,30 +173,6 @@
media::audio::common::toString(*result) : "unknown_latency_mode";
}
- /**
- * Format head to stage vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
- */
- template <typename T>
- static std::string toString(const std::vector<T>& vec, bool radianToDegree = false) {
- if (vec.size() == 0) {
- return "[]";
- }
-
- std::string ss = "[";
- for (auto f = vec.begin(); f != vec.end(); ++f) {
- if (f != vec.begin()) {
- ss .append(", ");
- }
- if (radianToDegree) {
- base::StringAppendF(&ss, "%0.2f", HeadToStagePoseRecorder::getDegreeWithRadian(*f));
- } else {
- base::StringAppendF(&ss, "%f", *f);
- }
- }
- ss.append("]");
- return ss;
- };
-
// If the Spatializer is not created, we send the status for metrics purposes.
// OK: Spatializer not expected to be created.
// NO_INIT: Spatializer creation failed.
@@ -427,92 +404,12 @@
* @brief Calculate and record sensor data.
* Dump to local log with max/average pose angle every mPoseRecordThreshold.
*/
- class HeadToStagePoseRecorder {
- public:
- HeadToStagePoseRecorder(std::chrono::duration<double> threshold, int maxLogLine)
- : mPoseRecordThreshold(threshold), mPoseRecordLog(maxLogLine) {
- resetRecord();
- }
-
- /** Convert recorded sensor data to string with level indentation */
- std::string toString(unsigned level) const;
-
- /**
- * @brief Calculate sensor data, record into local log when it is time.
- *
- * @param headToStage The vector from Pose3f::toVector().
- */
- void record(const std::vector<float>& headToStage);
-
- static constexpr float getDegreeWithRadian(const float radian) {
- float radianToDegreeRatio = (180 / PI);
- return (radian * radianToDegreeRatio);
- }
-
- private:
- static constexpr float PI = M_PI;
- /**
- * Pose recorder time threshold to record sensor data in local log.
- * Sensor data will be recorded into log at least every mPoseRecordThreshold.
- */
- std::chrono::duration<double> mPoseRecordThreshold;
- // Number of seconds pass since last record.
- std::chrono::duration<double> mNumOfSecondsSinceLastRecord;
- /**
- * According to frameworks/av/media/libheadtracking/include/media/Pose.h
- * "The vector will have exactly 6 elements, where the first three are a translation vector
- * and the last three are a rotation vector."
- */
- static constexpr size_t mPoseVectorSize = 6;
- /**
- * Timestamp of last sensor data record in local log.
- */
- std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp;
- /**
- * Number of sensor samples received since last record, sample rate is ~100Hz which produce
- * ~6k samples/minute.
- */
- uint32_t mNumOfSampleSinceLastRecord = 0;
- /* The sum of pose angle represented by radian since last dump, div
- * mNumOfSampleSinceLastRecord to get arithmetic mean. Largest possible value: 2PI * 100Hz *
- * mPoseRecordThreshold.
- */
- std::vector<double> mPoseRadianSum;
- std::vector<float> mMaxPoseAngle;
- std::vector<float> mMinPoseAngle;
- // Local log for history sensor data.
- SimpleLog mPoseRecordLog{mMaxLocalLogLine};
-
- bool shouldRecordLog() {
- mNumOfSecondsSinceLastRecord = std::chrono::duration_cast<std::chrono::seconds>(
- std::chrono::steady_clock::now() - mFirstSampleTimestamp);
- return mNumOfSecondsSinceLastRecord >= mPoseRecordThreshold;
- }
-
- void resetRecord() {
- mPoseRadianSum.assign(mPoseVectorSize, 0);
- mMaxPoseAngle.assign(mPoseVectorSize, -PI);
- mMinPoseAngle.assign(mPoseVectorSize, PI);
- mNumOfSampleSinceLastRecord = 0;
- mNumOfSecondsSinceLastRecord = std::chrono::seconds(0);
- }
-
- // Add each sample to sum and only calculate when record.
- void poseSumToAverage() {
- if (mNumOfSampleSinceLastRecord == 0) return;
- for (auto& p : mPoseRadianSum) {
- const float reciprocal = 1.f / mNumOfSampleSinceLastRecord;
- p *= reciprocal;
- }
- }
- }; // HeadToStagePoseRecorder
-
// Record one log line per second (up to mMaxLocalLogLine) to capture most recent sensor data.
- HeadToStagePoseRecorder mPoseRecorder GUARDED_BY(mLock) =
- HeadToStagePoseRecorder(std::chrono::seconds(1), mMaxLocalLogLine);
+ media::VectorRecorder mPoseRecorder GUARDED_BY(mLock) {
+ 6 /* vectorSize */, std::chrono::seconds(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
// Record one log line per minute (up to mMaxLocalLogLine) to capture durable sensor data.
- HeadToStagePoseRecorder mPoseDurableRecorder GUARDED_BY(mLock) =
- HeadToStagePoseRecorder(std::chrono::minutes(1), mMaxLocalLogLine);
+ media::VectorRecorder mPoseDurableRecorder GUARDED_BY(mLock) {
+ 6 /* vectorSize */, std::chrono::minutes(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
}; // Spatializer
}; // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 72dba3d..d325509 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#include "SpatializerPoseController.h"
#include <android-base/stringprintf.h>
#include <chrono>
@@ -21,8 +22,10 @@
#define LOG_TAG "SpatializerPoseController"
//#define LOG_NDEBUG 0
+#include <cutils/properties.h>
#include <sensor/Sensor.h>
#include <media/MediaMetricsItem.h>
+#include <media/QuaternionUtil.h>
#include <utils/Log.h>
#include <utils/SystemClock.h>
@@ -45,11 +48,17 @@
// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
constexpr float kMaxRotationalVelocity = 0.8f;
-// This is how far into the future we predict the head pose, using linear extrapolation based on
-// twist (velocity). It should be set to a value that matches the characteristic durations of moving
-// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
-// high will result in high prediction errors whenever the head accelerates (changes velocity).
-constexpr auto kPredictionDuration = 50ms;
+// This is how far into the future we predict the head pose.
+// The prediction duration should be based on the actual latency from
+// head-tracker to audio output, though setting the prediction duration too
+// high may result in higher prediction errors when the head accelerates or
+// decelerates (changes velocity).
+//
+// The head tracking predictor will do a best effort to achieve the requested
+// prediction duration. If the duration is too far in the future based on
+// current sensor variance, the predictor may internally restrict duration to what
+// is achievable with reasonable confidence as the "best prediction".
+constexpr auto kPredictionDuration = 120ms;
// After not getting a pose sample for this long, we would treat the measurement as stale.
// The max connection interval is 50ms, and HT sensor event interval can differ depending on the
@@ -97,7 +106,15 @@
.maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
.maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
.freshnessTimeout = Ticks(kFreshnessTimeout).count(),
- .predictionDuration = Ticks(kPredictionDuration).count(),
+ .predictionDuration = []() -> float {
+ const int duration_ms =
+ property_get_int32("audio.spatializer.prediction_duration_ms", 0);
+ if (duration_ms > 0) {
+ return duration_ms * 1'000'000LL;
+ } else {
+ return Ticks(kPredictionDuration).count();
+ }
+ }(),
.autoRecenterWindowDuration = Ticks(kAutoRecenterWindowDuration).count(),
.autoRecenterTranslationalThreshold = kAutoRecenterTranslationThreshold,
.autoRecenterRotationalThreshold = kAutoRecenterRotationThreshold,
@@ -145,7 +162,14 @@
mShouldCalculate = false;
}
}
- }) {}
+ }) {
+ const media::PosePredictorType posePredictorType =
+ (media::PosePredictorType)
+ property_get_int32("audio.spatializer.pose_predictor_type", -1);
+ if (isValidPosePredictorType(posePredictorType)) {
+ mProcessor->setPosePredictorType(posePredictorType);
+ }
+ }
SpatializerPoseController::~SpatializerPoseController() {
{
@@ -192,7 +216,7 @@
mHeadSensor = INVALID_SENSOR;
}
- mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */);
+ mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */, __func__);
}
void SpatializerPoseController::setScreenSensor(int32_t sensor) {
@@ -229,7 +253,7 @@
mScreenSensor = INVALID_SENSOR;
}
- mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */);
+ mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */, __func__);
}
void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
@@ -276,30 +300,66 @@
void SpatializerPoseController::recenter() {
std::lock_guard lock(mMutex);
- mProcessor->recenter();
+ mProcessor->recenter(true /* recenterHead */, true /* recenterScreen */, __func__);
}
void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
const std::optional<Twist3f>& twist, bool isNewReference) {
std::lock_guard lock(mMutex);
+ constexpr float NANOS_TO_MILLIS = 1e-6;
+ constexpr float RAD_TO_DEGREE = 180.f / M_PI;
+
+ const float delayMs = (elapsedRealtimeNano() - timestamp) * NANOS_TO_MILLIS; // CLOCK_BOOTTIME
+
if (sensor == mHeadSensor) {
+ std::vector<float> pryprydt(8); // pitch, roll, yaw, d_pitch, d_roll, d_yaw,
+ // discontinuity, timestamp_delay
+ media::quaternionToAngles(pose.rotation(), &pryprydt[0], &pryprydt[1], &pryprydt[2]);
+ if (twist) {
+ const auto rotationalVelocity = twist->rotationalVelocity();
+ // The rotational velocity is an intrinsic transform (i.e. based on the head
+ // coordinate system, not the world coordinate system). It is a 3 element vector:
+ // axis (d theta / dt).
+ //
+ // We leave rotational velocity relative to the head coordinate system,
+ // as the initial head tracking sensor's world frame is arbitrary.
+ media::quaternionToAngles(media::rotationVectorToQuaternion(rotationalVelocity),
+ &pryprydt[3], &pryprydt[4], &pryprydt[5]);
+ }
+ pryprydt[6] = isNewReference;
+ pryprydt[7] = delayMs;
+ for (size_t i = 0; i < 6; ++i) {
+ // pitch, roll, yaw in degrees, referenced in degrees on the world frame.
+ // d_pitch, d_roll, d_yaw rotational velocity in degrees/s, based on the world frame.
+ pryprydt[i] *= RAD_TO_DEGREE;
+ }
+ mHeadSensorRecorder.record(pryprydt);
+ mHeadSensorDurableRecorder.record(pryprydt);
+
mProcessor->setWorldToHeadPose(timestamp, pose,
twist.value_or(Twist3f()) / kTicksPerSecond);
if (isNewReference) {
- mProcessor->recenter(true, false);
+ mProcessor->recenter(true, false, __func__);
}
}
if (sensor == mScreenSensor) {
+ std::vector<float> pryt{ 0.f, 0.f, 0.f, delayMs}; // pitch, roll, yaw, timestamp_delay
+ media::quaternionToAngles(pose.rotation(), &pryt[0], &pryt[1], &pryt[2]);
+ for (size_t i = 0; i < 3; ++i) {
+ pryt[i] *= RAD_TO_DEGREE;
+ }
+ mScreenSensorRecorder.record(pryt);
+ mScreenSensorDurableRecorder.record(pryt);
+
mProcessor->setWorldToScreenPose(timestamp, pose);
if (isNewReference) {
- mProcessor->recenter(false, true);
+ mProcessor->recenter(false, true, __func__);
}
}
}
std::string SpatializerPoseController::toString(unsigned level) const {
- std::string prefixSpace;
- prefixSpace.append(level, ' ');
+ std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "SpatializerPoseController:\n";
bool needUnlock = false;
@@ -315,14 +375,31 @@
if (mHeadSensor == INVALID_SENSOR) {
ss += "HeadSensor: INVALID\n";
} else {
- base::StringAppendF(&ss, "HeadSensor: 0x%08x\n", mHeadSensor);
+ base::StringAppendF(&ss, "HeadSensor: 0x%08x "
+ "(active world-to-head : head-relative velocity) "
+ "[ pitch, roll, yaw : d_pitch, d_roll, d_yaw : disc : delay ] "
+ "(degrees, degrees/s, bool, ms)\n", mHeadSensor);
+ ss.append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mHeadSensorDurableRecorder.toString(level + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mHeadSensorRecorder.toString(level + 3));
}
ss += prefixSpace;
if (mScreenSensor == INVALID_SENSOR) {
ss += "ScreenSensor: INVALID\n";
} else {
- base::StringAppendF(&ss, "ScreenSensor: 0x%08x\n", mScreenSensor);
+ base::StringAppendF(&ss, "ScreenSensor: 0x%08x (active world-to-screen) "
+ "[ pitch, roll, yaw : delay ] "
+ "(degrees, ms)\n", mScreenSensor);
+ ss.append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mScreenSensorDurableRecorder.toString(level + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mScreenSensorRecorder.toString(level + 3));
}
ss += prefixSpace;
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
index 233f94c..9d78188 100644
--- a/services/audiopolicy/service/SpatializerPoseController.h
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -24,6 +24,7 @@
#include <media/HeadTrackingProcessor.h>
#include <media/SensorPoseProvider.h>
+#include <media/VectorRecorder.h>
namespace android {
@@ -131,6 +132,20 @@
bool mShouldExit = false;
bool mCalculated = false;
+ media::VectorRecorder mHeadSensorRecorder{
+ 8 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ { 3, 6, 7 } /* delimiterIdx */};
+ media::VectorRecorder mHeadSensorDurableRecorder{
+ 8 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ { 3, 6, 7 } /* delimiterIdx */};
+
+ media::VectorRecorder mScreenSensorRecorder{
+ 4 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ { 3 } /* delimiterIdx */};
+ media::VectorRecorder mScreenSensorDurableRecorder{
+ 4 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ { 3 } /* delimiterIdx */};
+
// It's important that mThread is the last variable in this class
// since we starts mThread in initializer list
std::thread mThread;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 2a04658..a7c9bac 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2043,6 +2043,7 @@
}
client->setImageDumpMask(mImageDumpMask);
+ client->setStreamUseCaseOverrides(mStreamUseCaseOverrides);
} // lock is destroyed, allow further connect calls
// Important: release the mutex here so the client can call back into the service from its
@@ -4419,6 +4420,13 @@
String8 activeClientString = mActiveClientManager.toString();
dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
+ if (mStreamUseCaseOverrides.size() > 0) {
+ dprintf(fd, "Active stream use case overrides:");
+ for (int64_t useCaseOverride : mStreamUseCaseOverrides) {
+ dprintf(fd, " %" PRId64, useCaseOverride);
+ }
+ dprintf(fd, "\n");
+ }
dumpEventLog(fd);
@@ -4910,6 +4918,10 @@
return handleGetImageDumpMask(out);
} else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
return handleSetCameraMute(args);
+ } else if (args.size() >= 2 && args[0] == String16("set-stream-use-case-override")) {
+ return handleSetStreamUseCaseOverrides(args);
+ } else if (args.size() >= 1 && args[0] == String16("clear-stream-use-case-override")) {
+ return handleClearStreamUseCaseOverrides();
} else if (args.size() >= 2 && args[0] == String16("watch")) {
return handleWatchCommand(args, in, out);
} else if (args.size() >= 2 && args[0] == String16("set-watchdog")) {
@@ -5082,6 +5094,43 @@
return OK;
}
+status_t CameraService::handleSetStreamUseCaseOverrides(const Vector<String16>& args) {
+ std::vector<int64_t> useCasesOverride;
+ for (size_t i = 1; i < args.size(); i++) {
+ int64_t useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ String8 arg8 = String8(args[i]);
+ if (arg8 == "DEFAULT") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ } else if (arg8 == "PREVIEW") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW;
+ } else if (arg8 == "STILL_CAPTURE") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE;
+ } else if (arg8 == "VIDEO_RECORD") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD;
+ } else if (arg8 == "PREVIEW_VIDEO_STILL") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL;
+ } else if (arg8 == "VIDEO_CALL") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_CALL;
+ } else {
+ ALOGE("%s: Invalid stream use case %s", __FUNCTION__, String8(args[i]).c_str());
+ return BAD_VALUE;
+ }
+ useCasesOverride.push_back(useCase);
+ }
+
+ Mutex::Autolock lock(mServiceLock);
+ mStreamUseCaseOverrides = std::move(useCasesOverride);
+
+ return OK;
+}
+
+status_t CameraService::handleClearStreamUseCaseOverrides() {
+ Mutex::Autolock lock(mServiceLock);
+ mStreamUseCaseOverrides.clear();
+
+ return OK;
+}
+
status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
if (args.size() >= 3 && args[1] == String16("start")) {
return startWatchingTags(args, outFd);
@@ -5436,6 +5485,15 @@
" Valid values 0=OFF, 1=ON for JPEG\n"
" get-image-dump-mask returns the current image-dump-mask value\n"
" set-camera-mute <0/1> enable or disable camera muting\n"
+ " set-stream-use-case-override <usecase1> <usecase2> ... override stream use cases\n"
+ " Use cases applied in descending resolutions. So usecase1 is assigned to the\n"
+ " largest resolution, usecase2 is assigned to the 2nd largest resolution, and so\n"
+ " on. In case the number of usecases is smaller than the number of streams, the\n"
+ " last use case is assigned to all the remaining streams. In case of multiple\n"
+ " streams with the same resolution, the tie-breaker is (JPEG, RAW, YUV, and PRIV)\n"
+ " Valid values are (case sensitive): DEFAULT, PREVIEW, STILL_CAPTURE, VIDEO_RECORD,\n"
+ " PREVIEW_VIDEO_STILL, VIDEO_CALL\n"
+ " clear-stream-use-case-override clear the stream use case override\n"
" watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
" help print this message\n");
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 840e9b6..588cfc0 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -348,6 +348,13 @@
// Set Camera service watchdog
virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
+ // Set stream use case overrides
+ virtual void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) = 0;
+
+ // Clear stream use case overrides
+ virtual void clearStreamUseCaseOverrides() = 0;
+
// The injection camera session to replace the internal camera
// session.
virtual status_t injectCamera(const String8& injectedCamId,
@@ -502,6 +509,7 @@
virtual bool canCastToApiClient(apiLevel level) const;
void setImageDumpMask(int /*mask*/) { }
+ void setStreamUseCaseOverrides(const std::vector<int64_t>& /*usecaseOverrides*/) { }
protected:
// Initialized in constructor
@@ -1216,6 +1224,12 @@
// Set the camera mute state
status_t handleSetCameraMute(const Vector<String16>& args);
+ // Set the stream use case overrides
+ status_t handleSetStreamUseCaseOverrides(const Vector<String16>& args);
+
+ // Clear the stream use case overrides
+ status_t handleClearStreamUseCaseOverrides();
+
// Handle 'watch' command as passed through 'cmd'
status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
@@ -1311,6 +1325,9 @@
// Camera Service watchdog flag
bool mCameraServiceWatchdogEnabled = true;
+ // Current stream use case overrides
+ std::vector<int64_t> mStreamUseCaseOverrides;
+
/**
* A listener class that implements the IBinder::DeathRecipient interface
* for use to call back the error state injected by the external camera, and
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 0887ced..430c82b 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -2347,6 +2347,15 @@
return mDevice->setCameraMute(enabled);
}
+void Camera2Client::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) {
+ mDevice->setStreamUseCaseOverrides(useCaseOverrides);
+}
+
+void Camera2Client::clearStreamUseCaseOverrides() {
+ mDevice->clearStreamUseCaseOverrides();
+}
+
status_t Camera2Client::waitUntilCurrentRequestIdLocked() {
int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
if (activeRequestId != 0) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 9c540a4..8071bcb 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -92,6 +92,10 @@
virtual status_t setCameraServiceWatchdog(bool enabled);
+ virtual void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides);
+ virtual void clearStreamUseCaseOverrides();
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index bc76397..dd23c2e 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1752,6 +1752,15 @@
return mDevice->setCameraMute(enabled);
}
+void CameraDeviceClient::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) {
+ mDevice->setStreamUseCaseOverrides(useCaseOverrides);
+}
+
+void CameraDeviceClient::clearStreamUseCaseOverrides() {
+ mDevice->clearStreamUseCaseOverrides();
+}
+
binder::Status CameraDeviceClient::switchToOffline(
const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
const std::vector<int>& offlineOutputIds,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 6bb64d6..c95bb4a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -210,6 +210,9 @@
virtual status_t setCameraServiceWatchdog(bool enabled);
+ virtual void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides);
+ virtual void clearStreamUseCaseOverrides() override;
+
/**
* Device listener interface
*/
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 2cb3397..52d0020 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -90,6 +90,13 @@
return INVALID_OPERATION;
}
+void CameraOfflineSessionClient::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& /*useCaseOverrides*/) {
+}
+
+void CameraOfflineSessionClient::clearStreamUseCaseOverrides() {
+}
+
status_t CameraOfflineSessionClient::dump(int fd, const Vector<String16>& args) {
return BasicClient::dump(fd, args);
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 8edb64a..23e1f3d 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -87,6 +87,11 @@
status_t setCameraServiceWatchdog(bool enabled) override;
+ void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) override;
+
+ void clearStreamUseCaseOverrides() override;
+
// permissions management
status_t startCameraOps() override;
status_t finishCameraOps() override;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ad24392..bf6be64 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -343,6 +343,29 @@
}
template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyPhysicalCameraChange(const std::string &physicalId) {
+ // We're only interested in this notification if overrideToPortrait is turned on.
+ if (!TClientBase::mOverrideToPortrait) {
+ return;
+ }
+
+ String8 physicalId8(physicalId.c_str());
+ auto physicalCameraMetadata = mDevice->infoPhysical(physicalId8);
+ auto orientationEntry = physicalCameraMetadata.find(ANDROID_SENSOR_ORIENTATION);
+
+ if (orientationEntry.count == 1) {
+ int orientation = orientationEntry.data.i32[0];
+ int rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_NONE;
+
+ if (orientation == 0 || orientation == 180) {
+ rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_90;
+ }
+
+ static_cast<TClientBase *>(this)->setRotateAndCropOverride(rotateAndCropMode);
+ }
+}
+
+template <typename TClientBase>
status_t Camera2ClientBase<TClientBase>::notifyActive(float maxPreviewFps) {
if (!mDeviceActive) {
status_t res = TClientBase::startCameraStreamingOps();
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index d2dcdb1..705fe69 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -75,6 +75,7 @@
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
+ virtual void notifyPhysicalCameraChange(const std::string &physicalId) override;
// Returns errors on app ops permission failures
virtual status_t notifyActive(float maxPreviewFps);
virtual void notifyIdle(int64_t /*requestCount*/, int64_t /*resultErrorCount*/,
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 69514f3..8f7b16d 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -464,6 +464,15 @@
void setImageDumpMask(int mask) { mImageDumpMask = mask; }
/**
+ * Set stream use case overrides
+ */
+ void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides) {
+ mStreamUseCaseOverrides = useCaseOverrides;
+ }
+
+ void clearStreamUseCaseOverrides() {}
+
+ /**
* The injection camera session to replace the internal camera
* session.
*/
@@ -477,6 +486,7 @@
protected:
bool mImageDumpMask = 0;
+ std::vector<int64_t> mStreamUseCaseOverrides;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
index f39b92a..63abcf0 100644
--- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
+++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
@@ -40,6 +40,10 @@
// Required for API 1 and 2
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras &resultExtras) = 0;
+
+ // Optional for API 1 and 2
+ virtual void notifyPhysicalCameraChange(const std::string &/*physicalId*/) {}
+
// May return an error since it checks appops
virtual status_t notifyActive(float maxPreviewFps) = 0;
virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 3aab0b1..5ab7023 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -2033,7 +2033,7 @@
}
CameraMetadata info2;
res = device->getCameraCharacteristics(true /*overrideForPerfClass*/, &info2,
- /*overrideToPortrait*/true);
+ /*overrideToPortrait*/false);
if (res == INVALID_OPERATION) {
dprintf(fd, " API2 not directly supported\n");
} else if (res != OK) {
@@ -2384,8 +2384,8 @@
if (overrideToPortrait) {
const auto &lensFacingEntry = characteristics->find(ANDROID_LENS_FACING);
const auto &sensorOrientationEntry = characteristics->find(ANDROID_SENSOR_ORIENTATION);
+ uint8_t lensFacing = lensFacingEntry.data.u8[0];
if (lensFacingEntry.count > 0 && sensorOrientationEntry.count > 0) {
- uint8_t lensFacing = lensFacingEntry.data.u8[0];
int32_t sensorOrientation = sensorOrientationEntry.data.i32[0];
int32_t newSensorOrientation = sensorOrientation;
@@ -2406,6 +2406,8 @@
}
if (characteristics->exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+ ALOGV("%s: Erasing ANDROID_INFO_DEVICE_STATE_ORIENTATIONS for lens facing %d",
+ __FUNCTION__, lensFacing);
characteristics->erase(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
}
}
@@ -2441,8 +2443,8 @@
for (size_t i = 0; i < streamConfigs.count; i += 4) {
if ((streamConfigs.data.i32[i] == HAL_PIXEL_FORMAT_BLOB) && (streamConfigs.data.i32[i+3] ==
ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
- if (streamConfigs.data.i32[i+1] < thresholdW ||
- streamConfigs.data.i32[i+2] < thresholdH) {
+ if (streamConfigs.data.i32[i+1] * streamConfigs.data.i32[i+2] <
+ thresholdW * thresholdH) {
continue;
} else {
largeJpegCount ++;
@@ -2462,8 +2464,8 @@
mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
for (size_t i = 0; i < minDurations.count; i += 4) {
if (minDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) {
- if (minDurations.data.i64[i+1] < thresholdW ||
- minDurations.data.i64[i+2] < thresholdH) {
+ if ((int32_t)minDurations.data.i64[i+1] * (int32_t)minDurations.data.i64[i+2] <
+ thresholdW * thresholdH) {
continue;
} else {
largeJpegCount++;
@@ -2483,8 +2485,8 @@
mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS);
for (size_t i = 0; i < stallDurations.count; i += 4) {
if (stallDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) {
- if (stallDurations.data.i64[i+1] < thresholdW ||
- stallDurations.data.i64[i+2] < thresholdH) {
+ if ((int32_t)stallDurations.data.i64[i+1] * (int32_t)stallDurations.data.i64[i+2] <
+ thresholdW * thresholdH) {
continue;
} else {
largeJpegCount++;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 5e7fe7f..e631f8b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -96,7 +96,8 @@
mLastTemplateId(-1),
mNeedFixupMonochromeTags(false),
mOverrideForPerfClass(overrideForPerfClass),
- mOverrideToPortrait(overrideToPortrait)
+ mOverrideToPortrait(overrideToPortrait),
+ mActivePhysicalId("")
{
ATRACE_CALL();
ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string());
@@ -2345,6 +2346,9 @@
tryRemoveFakeStreamLocked();
}
+ // Override stream use case based on "adb shell command"
+ overrideStreamUseCaseLocked();
+
// Start configuring the streams
ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string());
@@ -4124,6 +4128,19 @@
return OK;
}
+void Camera3Device::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ mStreamUseCaseOverrides = useCaseOverrides;
+}
+
+void Camera3Device::clearStreamUseCaseOverrides() {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ mStreamUseCaseOverrides.clear();
+}
+
void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
if (mNextRequests.empty()) {
return;
@@ -5242,4 +5259,55 @@
return mInjectionMethods->stopInjection();
}
+void Camera3Device::overrideStreamUseCaseLocked() {
+ if (mStreamUseCaseOverrides.size() == 0) {
+ return;
+ }
+
+ // Start from an array of indexes in mStreamUseCaseOverrides, and sort them
+ // based first on size, and second on formats of [JPEG, RAW, YUV, PRIV].
+ std::vector<int> outputStreamsIndices(mOutputStreams.size());
+ for (size_t i = 0; i < outputStreamsIndices.size(); i++) {
+ outputStreamsIndices[i] = i;
+ }
+
+ std::sort(outputStreamsIndices.begin(), outputStreamsIndices.end(),
+ [&](int a, int b) -> bool {
+
+ auto formatScore = [](int format) {
+ switch (format) {
+ case HAL_PIXEL_FORMAT_BLOB:
+ return 4;
+ case HAL_PIXEL_FORMAT_RAW16:
+ case HAL_PIXEL_FORMAT_RAW10:
+ case HAL_PIXEL_FORMAT_RAW12:
+ return 3;
+ case HAL_PIXEL_FORMAT_YCBCR_420_888:
+ return 2;
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ return 1;
+ default:
+ return 0;
+ }
+ };
+
+ int sizeA = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight();
+ int sizeB = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight();
+ int formatAScore = formatScore(mOutputStreams[a]->getFormat());
+ int formatBScore = formatScore(mOutputStreams[b]->getFormat());
+ if (sizeA > sizeB ||
+ (sizeA == sizeB && formatAScore >= formatBScore)) {
+ return true;
+ } else {
+ return false;
+ }
+ });
+
+ size_t overlapSize = std::min(mStreamUseCaseOverrides.size(), mOutputStreams.size());
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+ mOutputStreams[outputStreamsIndices[i]]->setStreamUseCase(
+ mStreamUseCaseOverrides[std::min(i, overlapSize-1)]);
+ }
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 1a50c02..746205b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -287,6 +287,13 @@
*/
status_t setCameraServiceWatchdog(bool enabled);
+ // Set stream use case overrides
+ void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides);
+
+ // Clear stream use case overrides
+ void clearStreamUseCaseOverrides();
+
// Get the status trackeer for the camera device
wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
@@ -1375,6 +1382,9 @@
// app compatibility reasons.
bool mOverrideToPortrait;
+ // Current active physical id of the logical multi-camera, if any
+ std::string mActivePhysicalId;
+
// The current minimum expected frame duration based on AE_TARGET_FPS_RANGE
nsecs_t mMinExpectedDuration = 0;
// Whether the camera device runs at fixed frame rate based on AE_MODE and
@@ -1463,6 +1473,8 @@
sp<Camera3DeviceInjectionMethods> mInjectionMethods;
+ void overrideStreamUseCaseLocked();
+
}; // class Camera3Device
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index a93d1da..1e9f478 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -101,6 +101,8 @@
virtual status_t setBatchSize(size_t batchSize) override;
virtual void onMinDurationChanged(nsecs_t /*duration*/, bool /*fixedFps*/) {}
+
+ virtual void setStreamUseCase(int64_t /*streamUseCase*/) {}
protected:
/**
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 396104c..ef12b64 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -1392,6 +1392,11 @@
mFixedFps = fixedFps;
}
+void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) {
+ Mutex::Autolock l(mLock);
+ camera_stream::use_case = streamUseCase;
+}
+
void Camera3OutputStream::returnPrefetchedBuffersLocked() {
std::vector<Surface::BatchBuffer> batchedBuffers;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index db988a0..a719d6b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -253,6 +253,11 @@
virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) override;
/**
+ * Modify stream use case
+ */
+ virtual void setStreamUseCase(int64_t streamUseCase) override;
+
+ /**
* Apply ZSL related consumer usage quirk.
*/
static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index dbc6fe1..4baa7e8 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -117,6 +117,11 @@
* AE_TARGET_FPS_RANGE in the capture request.
*/
virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) = 0;
+
+ /**
+ * Modify the stream use case for this output.
+ */
+ virtual void setStreamUseCase(int64_t streamUseCase) = 0;
};
// Helper class to organize a synchronized mapping of stream IDs to stream instances
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 6569395..792756ab 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -521,27 +521,35 @@
if (result->partial_result != 0)
request.resultExtras.partialResultCount = result->partial_result;
- if ((result->result != nullptr) && !states.legacyClient && !states.overrideToPortrait) {
+ if (result->result != nullptr) {
camera_metadata_ro_entry entry;
auto ret = find_camera_metadata_ro_entry(result->result,
ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry);
if ((ret == OK) && (entry.count > 0)) {
std::string physicalId(reinterpret_cast<const char *>(entry.data.u8));
- auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
- if (deviceInfo != states.physicalDeviceInfoMap.end()) {
- auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
- if (orientation.count > 0) {
- ret = CameraUtils::getRotationTransform(deviceInfo->second,
- OutputConfiguration::MIRROR_MODE_AUTO, &request.transform);
- if (ret != OK) {
- ALOGE("%s: Failed to calculate current stream transformation: %s (%d)",
- __FUNCTION__, strerror(-ret), ret);
+ if (!states.activePhysicalId.empty() && physicalId != states.activePhysicalId) {
+ states.listener->notifyPhysicalCameraChange(physicalId);
+ }
+ states.activePhysicalId = physicalId;
+
+ if (!states.legacyClient && !states.overrideToPortrait) {
+ auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
+ if (deviceInfo != states.physicalDeviceInfoMap.end()) {
+ auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
+ if (orientation.count > 0) {
+ ret = CameraUtils::getRotationTransform(deviceInfo->second,
+ OutputConfiguration::MIRROR_MODE_AUTO, &request.transform);
+ if (ret != OK) {
+ ALOGE("%s: Failed to calculate current stream transformation: %s "
+ "(%d)", __FUNCTION__, strerror(-ret), ret);
+ }
+ } else {
+ ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
}
} else {
- ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
+ ALOGE("%s: Physical device not found in device info map found!",
+ __FUNCTION__);
}
- } else {
- ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__);
}
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 019c8a8..d5328c5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -108,6 +108,7 @@
nsecs_t& minFrameDuration;
bool& isFixedFps;
bool overrideToPortrait;
+ std::string &activePhysicalId;
};
void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 1e103f2..3fa7299 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -376,7 +376,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps,
- mOverrideToPortrait}, mResultMetadataQueue
+ mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& result : results) {
@@ -418,7 +418,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps,
- mOverrideToPortrait}, mResultMetadataQueue
+ mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
index 816f96b..3c3db97 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
@@ -111,6 +111,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId(""); // Unused
AidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -125,7 +126,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -157,6 +158,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId(""); // Unused
AidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -171,7 +173,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 44c60cf..382b287 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -365,8 +365,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
- mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+ mActivePhysicalId}, mResultMetadataQueue
};
//HidlCaptureOutputStates hidlStates {
@@ -428,8 +428,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
- mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+ mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& result : results) {
@@ -476,8 +476,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
- mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+ mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
index 705408d..28b2b47 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -92,6 +92,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId("");
HidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -106,7 +107,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -133,6 +134,7 @@
hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
+ std::string activePhysicalId("");
HidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -147,7 +149,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -169,6 +171,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId("");
HidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -183,7 +186,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);