Remove drift compensator

Drift compensation is better done by the actual sensor, as it has the
knowledge of the best method to do so. For example, some sensors don't
suffer from drift at all.

This change replaces the drift compensation with a much simpler module
which only takes care of biasing (for recentering purposes).

Test: atest --host libheadtracking-test
Test: Manual verification of spatial audio with head-tracking.
Change-Id: I50ad8f53a6d7016bb8f75772702b35634adbc413
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index b0563e2..1d41889 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -14,6 +14,7 @@
       "HeadTrackingProcessor.cpp",
       "ModeSelector.cpp",
       "Pose.cpp",
+      "PoseBias.cpp",
       "PoseDriftCompensator.cpp",
       "PoseRateLimiter.cpp",
       "QuaternionUtil.cpp",
@@ -67,6 +68,7 @@
         "HeadTrackingProcessor-test.cpp",
         "ModeSelector-test.cpp",
         "Pose-test.cpp",
+        "PoseBias-test.cpp",
         "PoseDriftCompensator-test.cpp",
         "PoseRateLimiter-test.cpp",
         "QuaternionUtil-test.cpp",
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index f2f15df..71fae8a 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -17,7 +17,7 @@
 #include "media/HeadTrackingProcessor.h"
 
 #include "ModeSelector.h"
-#include "PoseDriftCompensator.h"
+#include "PoseBias.h"
 #include "QuaternionUtil.h"
 #include "ScreenHeadFusion.h"
 #include "StillnessDetector.h"
@@ -33,14 +33,6 @@
   public:
     HeadTrackingProcessorImpl(const Options& options, HeadTrackingMode initialMode)
         : mOptions(options),
-          mHeadPoseDriftCompensator(PoseDriftCompensator::Options{
-                  .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
-                  .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
-          }),
-          mScreenPoseDriftCompensator(PoseDriftCompensator::Options{
-                  .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
-                  .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
-          }),
           mHeadStillnessDetector(StillnessDetector::Options{
                   .defaultValue = false,
                   .windowDuration = options.autoRecenterWindowDuration,
@@ -65,7 +57,7 @@
                             const Twist3f& headTwist) override {
         Pose3f predictedWorldToHead =
                 worldToHead * integrate(headTwist, mOptions.predictionDuration);
-        mHeadPoseDriftCompensator.setInput(timestamp, predictedWorldToHead);
+        mHeadPoseBias.setInput(predictedWorldToHead);
         mHeadStillnessDetector.setInput(timestamp, predictedWorldToHead);
         mWorldToHeadTimestamp = timestamp;
     }
@@ -78,7 +70,7 @@
         }
 
         Pose3f worldToLogicalScreen = worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle));
-        mScreenPoseDriftCompensator.setInput(timestamp, worldToLogicalScreen);
+        mScreenPoseBias.setInput(worldToLogicalScreen);
         mScreenStillnessDetector.setInput(timestamp, worldToLogicalScreen);
         mWorldToScreenTimestamp = timestamp;
     }
@@ -94,7 +86,7 @@
     void calculate(int64_t timestamp) override {
         // Handle the screen first, since it might trigger a recentering of the head.
         if (mWorldToScreenTimestamp.has_value()) {
-            const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput();
+            const Pose3f worldToLogicalScreen = mScreenPoseBias.getOutput();
             bool screenStable = mScreenStillnessDetector.calculate(timestamp);
             mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
             // Whenever the screen is unstable, recenter the head pose.
@@ -107,11 +99,11 @@
 
         // Handle head.
         if (mWorldToHeadTimestamp.has_value()) {
-            Pose3f worldToHead = mHeadPoseDriftCompensator.getOutput();
+            Pose3f worldToHead = mHeadPoseBias.getOutput();
             // Auto-recenter.
             if (mHeadStillnessDetector.calculate(timestamp)) {
                 recenter(true, false);
-                worldToHead = mHeadPoseDriftCompensator.getOutput();
+                worldToHead = mHeadPoseBias.getOutput();
             }
 
             mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
@@ -142,11 +134,11 @@
 
     void recenter(bool recenterHead, bool recenterScreen) override {
         if (recenterHead) {
-            mHeadPoseDriftCompensator.recenter();
+            mHeadPoseBias.recenter();
             mHeadStillnessDetector.reset();
         }
         if (recenterScreen) {
-            mScreenPoseDriftCompensator.recenter();
+            mScreenPoseBias.recenter();
             mScreenStillnessDetector.reset();
         }
 
@@ -169,8 +161,8 @@
     std::optional<int64_t> mWorldToHeadTimestamp;
     std::optional<int64_t> mWorldToScreenTimestamp;
     Pose3f mHeadToStagePose;
-    PoseDriftCompensator mHeadPoseDriftCompensator;
-    PoseDriftCompensator mScreenPoseDriftCompensator;
+    PoseBias mHeadPoseBias;
+    PoseBias mScreenPoseBias;
     StillnessDetector mHeadStillnessDetector;
     StillnessDetector mScreenStillnessDetector;
     ScreenHeadFusion mScreenHeadFusion;
diff --git a/media/libheadtracking/PoseBias-test.cpp b/media/libheadtracking/PoseBias-test.cpp
new file mode 100644
index 0000000..9f42a2c
--- /dev/null
+++ b/media/libheadtracking/PoseBias-test.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "PoseBias.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+TEST(PoseBias, Initial) {
+    PoseBias bias;
+    EXPECT_EQ(bias.getOutput(), Pose3f());
+}
+
+TEST(PoseBias, Basic) {
+    Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+    Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+
+    PoseBias bias;
+    bias.setInput(pose1);
+    EXPECT_EQ(pose1, bias.getOutput());
+    bias.recenter();
+    EXPECT_EQ(bias.getOutput(), Pose3f());
+    bias.setInput(pose2);
+    EXPECT_EQ(bias.getOutput(), pose1.inverse() * pose2);
+    bias.recenter();
+    EXPECT_EQ(bias.getOutput(), Pose3f());
+}
+
+}  // namespace
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseBias.cpp b/media/libheadtracking/PoseBias.cpp
new file mode 100644
index 0000000..33afca6
--- /dev/null
+++ b/media/libheadtracking/PoseBias.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseBias.h"
+
+namespace android {
+namespace media {
+
+void PoseBias::setInput(const Pose3f& input) {
+    mLastWorldToInput = input;
+}
+
+void PoseBias::recenter() {
+    mBiasToWorld = mLastWorldToInput.inverse();
+}
+
+Pose3f PoseBias::getOutput() const {
+    return mBiasToWorld * mLastWorldToInput;
+}
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseBias.h b/media/libheadtracking/PoseBias.h
new file mode 100644
index 0000000..9acb49d
--- /dev/null
+++ b/media/libheadtracking/PoseBias.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Biasing for a stream of poses.
+ *
+ * This filter takes a stream of poses and at any time during the stream, can change the frame of
+ * reference for the stream to be that of the last pose received, via the recenter() operation.
+ *
+ * Typical usage:
+ * PoseBias bias;
+ *
+ * bias.setInput(...);
+ * output = bias.getOutput();
+ * bias.setInput(...);
+ * output = bias.getOutput();
+ * bias.setInput(...);
+ * output = bias.getOutput();
+ * bias.recenter();  // Reference frame is now equal to the last input.
+ * output = bias.getOutput();  // This is now the identity pose.
+ *
+ * There doesn't need to be a 1:1 correspondence between setInput() and getOutput() calls.
+ * The initial bias point is identity.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseBias {
+  public:
+    void setInput(const Pose3f& input);
+
+    void recenter();
+
+    Pose3f getOutput() const;
+
+  private:
+    Pose3f mLastWorldToInput;
+    Pose3f mBiasToWorld;
+};
+
+}  // namespace media
+}  // namespace android
diff --git a/media/libheadtracking/PoseProcessingGraph.png b/media/libheadtracking/PoseProcessingGraph.png
index 2b4ea68..325b667 100644
--- a/media/libheadtracking/PoseProcessingGraph.png
+++ b/media/libheadtracking/PoseProcessingGraph.png
Binary files differ
diff --git a/media/libheadtracking/README.md b/media/libheadtracking/README.md
index 5ec157b..44f7bb2 100644
--- a/media/libheadtracking/README.md
+++ b/media/libheadtracking/README.md
@@ -115,11 +115,9 @@
 #### World
 
 It is sometimes convenient to use an intermediate frame when dealing with
-head-to-screen transforms. The “world” frame is an arbitrary frame of reference
-in the physical world, relative to which we can measure the head pose and screen
-pose. In (very common) cases when we can’t establish such an absolute frame, we
-can take each measurement relative to a separate, arbitrary frame and high-pass
-the result.
+head-to-screen transforms. The “world” frame is a frame of reference in the
+physical world, relative to which we can measure the head pose and screen pose.
+It is arbitrary, but expected to be stable (fixed).
 
 ## Processing Description
 
@@ -133,15 +131,10 @@
 The Predictor block gets pose + twist (pose derivative) and extrapolates to
 obtain a predicted head pose (w/ given latency).
 
-### Drift / Bias Compensator
+### Bias
 
-The Drift / Bias Compensator blocks serve two purposes:
-
-- Compensate for floating reference axes by applying a high-pass filter, which
-  slowly pulls the pose toward identity.
-- Establish the reference frame for the poses by having the ability to set the
-  current pose as the reference for future poses (recentering). Effectively,
-  this is resetting the filter state to identity.
+The Bias blocks establish the reference frame for the poses by having the
+ability to set the current pose as the reference for future poses (recentering).
 
 ### Orientation Compensation
 
@@ -157,7 +150,7 @@
 module may indicate that the user is likely not in front of the screen via the
 “valid” output.
 
-## Stillness Detector
+### Stillness Detector
 
 The stillness detector blocks detect when their incoming pose stream has been
 stable for a given amount of time (allowing for a configurable amount of error).
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 2af560e..1744be3 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -38,8 +38,6 @@
     struct Options {
         float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
         float maxRotationalVelocity = std::numeric_limits<float>::infinity();
-        float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
-        float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
         int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
         float predictionDuration = 0;
         int64_t autoRecenterWindowDuration = std::numeric_limits<int64_t>::max();
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 440a7ff..ef0eb2a 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -40,20 +40,6 @@
 // This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
 constexpr float kMaxRotationalVelocity = 8;
 
-// This should be set to the typical time scale that the translation sensors used drift in. This
-// means, loosely, for how long we can trust the reading to be "accurate enough". This would
-// determine the time constants used for high-pass filtering those readings. If the value is set
-// too high, we may experience drift. If it is set too low, we may experience poses tending toward
-// identity too fast.
-constexpr auto kTranslationalDriftTimeConstant = 40s;
-
-// This should be set to the typical time scale that the rotation sensors used drift in. This
-// means, loosely, for how long we can trust the reading to be "accurate enough". This would
-// determine the time constants used for high-pass filtering those readings. If the value is set
-// too high, we may experience drift. If it is set too low, we may experience poses tending toward
-// identity too fast.
-constexpr auto kRotationalDriftTimeConstant = 60s;
-
 // This is how far into the future we predict the head pose, using linear extrapolation based on
 // twist (velocity). It should be set to a value that matches the characteristic durations of moving
 // one's head. The higher we set this, the more latency we are able to reduce, but setting this too
@@ -100,9 +86,6 @@
       mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
               .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
               .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
-              .translationalDriftTimeConstant =
-                      double(Ticks(kTranslationalDriftTimeConstant).count()),
-              .rotationalDriftTimeConstant = double(Ticks(kRotationalDriftTimeConstant).count()),
               .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
               .predictionDuration = Ticks(kPredictionDuration).count(),
               .autoRecenterWindowDuration = Ticks(kAutoRecenterWindowDuration).count(),