Merge "Add missing GLES 3.x metadata"
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index e33f099..349bbed 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -1336,22 +1336,14 @@
     return std::string(hash_buffer);
 }
 
-static void SendShellBroadcast(const std::string& action, const std::vector<std::string>& args) {
-    std::vector<std::string> am = {
-        "/system/bin/cmd", "activity", "broadcast", "--user", "0", "-a", action};
+static void SendBroadcast(const std::string& action, const std::vector<std::string>& args) {
+    // clang-format off
+    std::vector<std::string> am = {"/system/bin/cmd", "activity", "broadcast", "--user", "0",
+                    "--receiver-foreground", "--receiver-include-background", "-a", action};
+    // clang-format on
 
     am.insert(am.end(), args.begin(), args.end());
 
-    // TODO: explicity setting Shell's component to allow broadcast to launch it.
-    // That might break other components that are listening to the bugreport notifications
-    // (com.android.internal.intent.action.BUGREPORT_STARTED and
-    // com.android.internal.intent.action.BUGREPORT_STOPED), but
-    // those should be just handled by Shell anyways.
-    // A more generic alternative would be passing the -f 0x01000000 flag (or whatever
-    // value is defined by FLAG_RECEIVER_INCLUDE_BACKGROUND), but that would reset the
-    // --receiver-foreground option
-    am.push_back("com.android.shell");
-
     RunCommand("", am,
                CommandOptions::WithTimeout(20)
                    .Log("Sending broadcast: '%s'\n")
@@ -1594,14 +1586,14 @@
                 // clang-format off
 
                 std::vector<std::string> am_args = {
-                     "--receiver-permission", "android.permission.DUMP", "--receiver-foreground",
+                     "--receiver-permission", "android.permission.DUMP",
                      "--es", "android.intent.extra.NAME", ds.name_,
                      "--ei", "android.intent.extra.ID", std::to_string(ds.id_),
                      "--ei", "android.intent.extra.PID", std::to_string(ds.pid_),
                      "--ei", "android.intent.extra.MAX", std::to_string(ds.progress_->GetMax()),
                 };
                 // clang-format on
-                SendShellBroadcast("com.android.internal.intent.action.BUGREPORT_STARTED", am_args);
+                SendBroadcast("com.android.internal.intent.action.BUGREPORT_STARTED", am_args);
             }
             if (use_control_socket) {
                 dprintf(ds.control_socket_fd_, "BEGIN:%s\n", ds.path_.c_str());
@@ -1809,7 +1801,7 @@
             // clang-format off
 
             std::vector<std::string> am_args = {
-                 "--receiver-permission", "android.permission.DUMP", "--receiver-foreground",
+                 "--receiver-permission", "android.permission.DUMP",
                  "--ei", "android.intent.extra.ID", std::to_string(ds.id_),
                  "--ei", "android.intent.extra.PID", std::to_string(ds.pid_),
                  "--ei", "android.intent.extra.MAX", std::to_string(ds.progress_->GetMax()),
@@ -1826,10 +1818,10 @@
                 am_args.push_back("--es");
                 am_args.push_back("android.intent.extra.REMOTE_BUGREPORT_HASH");
                 am_args.push_back(SHA256_file_hash(ds.path_));
-                SendShellBroadcast("com.android.internal.intent.action.REMOTE_BUGREPORT_FINISHED",
-                                   am_args);
+                SendBroadcast("com.android.internal.intent.action.REMOTE_BUGREPORT_FINISHED",
+                              am_args);
             } else {
-                SendShellBroadcast("com.android.internal.intent.action.BUGREPORT_FINISHED", am_args);
+                SendBroadcast("com.android.internal.intent.action.BUGREPORT_FINISHED", am_args);
             }
         } else {
             MYLOGE("Skipping finished broadcast because bugreport could not be generated\n");
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 535d060..aa10666 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -2014,5 +2014,11 @@
     return mQuotaDevices[path];
 }
 
+binder::Status InstalldNativeService::isQuotaSupported(
+        const std::unique_ptr<std::string>& volumeUuid, bool* _aidl_return) {
+    *_aidl_return = !findQuotaDeviceForUuid(volumeUuid).empty();
+    return ok();
+}
+
 }  // namespace installd
 }  // namespace android
diff --git a/cmds/installd/InstalldNativeService.h b/cmds/installd/InstalldNativeService.h
index b3dbaf4..feb2219 100644
--- a/cmds/installd/InstalldNativeService.h
+++ b/cmds/installd/InstalldNativeService.h
@@ -111,6 +111,8 @@
         const std::unique_ptr<std::string>& volumeUuid, int32_t storage_flag, bool* _aidl_return);
 
     binder::Status invalidateMounts();
+    binder::Status isQuotaSupported(const std::unique_ptr<std::string>& volumeUuid,
+            bool* _aidl_return);
 
 private:
     std::recursive_mutex mLock;
diff --git a/cmds/installd/binder/android/os/IInstalld.aidl b/cmds/installd/binder/android/os/IInstalld.aidl
index b45df87..4195a01 100644
--- a/cmds/installd/binder/android/os/IInstalld.aidl
+++ b/cmds/installd/binder/android/os/IInstalld.aidl
@@ -77,4 +77,5 @@
         int storage_flag);
 
     void invalidateMounts();
+    boolean isQuotaSupported(@nullable @utf8InCpp String uuid);
 }
diff --git a/include/ui/ANativeObjectBase.h b/include/ui/ANativeObjectBase.h
index 76e850f..640e34b 100644
--- a/include/ui/ANativeObjectBase.h
+++ b/include/ui/ANativeObjectBase.h
@@ -18,9 +18,7 @@
 #define ANDROID_ANDROID_NATIVES_H
 
 #include <sys/types.h>
-#include <string.h>
 
-#include <hardware/gralloc.h>
 #include <system/window.h>
 
 // ---------------------------------------------------------------------------
diff --git a/include/ui/ColorSpace.h b/include/ui/ColorSpace.h
index e9260b5..8c4acb7 100644
--- a/include/ui/ColorSpace.h
+++ b/include/ui/ColorSpace.h
@@ -23,10 +23,10 @@
 #include <memory>
 #include <string>
 
-#include <ui/mat3.h>
-#include <ui/scalar.h>
-#include <ui/vec2.h>
-#include <ui/vec3.h>
+#include <math/mat3.h>
+#include <math/scalar.h>
+#include <math/vec2.h>
+#include <math/vec3.h>
 
 namespace android {
 
diff --git a/include/ui/DisplayInfo.h b/include/ui/DisplayInfo.h
index 842806e..94caf6b 100644
--- a/include/ui/DisplayInfo.h
+++ b/include/ui/DisplayInfo.h
@@ -19,9 +19,8 @@
 
 #include <stdint.h>
 #include <sys/types.h>
-#include <utils/Timers.h>
 
-#include <ui/PixelFormat.h>
+#include <utils/Timers.h>
 
 namespace android {
 
diff --git a/include/ui/Gralloc1.h b/include/ui/Gralloc1.h
index 640e29c..90713b3 100644
--- a/include/ui/Gralloc1.h
+++ b/include/ui/Gralloc1.h
@@ -19,10 +19,17 @@
 
 #define GRALLOC1_LOG_TAG "Gralloc1"
 
-#include <ui/Gralloc1On0Adapter.h>
-
+#include <functional>
+#include <memory>
 #include <unordered_set>
 
+#include <log/log.h>
+
+#include <ui/Fence.h>
+
+#include <hardware/gralloc1.h>
+
+
 namespace std {
     template <>
     struct hash<gralloc1_capability_t> {
@@ -33,10 +40,42 @@
 }
 
 namespace android {
-
+class GraphicBuffer;
 class Fence;
 class GraphicBuffer;
+class Gralloc1On0Adapter;
+} // namespace android
 
+
+// This is not an "official" capability (i.e., it is not found in gralloc1.h),
+// but we will use it to detect that we are running through the adapter, which
+// is capable of collaborating with GraphicBuffer such that queries on a
+// buffer_handle_t succeed
+static const auto GRALLOC1_CAPABILITY_ON_ADAPTER =
+        static_cast<gralloc1_capability_t>(GRALLOC1_LAST_CAPABILITY + 1);
+
+static const auto GRALLOC1_FUNCTION_RETAIN_GRAPHIC_BUFFER =
+        static_cast<gralloc1_function_descriptor_t>(GRALLOC1_LAST_FUNCTION + 1);
+static const auto GRALLOC1_FUNCTION_ALLOCATE_WITH_ID =
+        static_cast<gralloc1_function_descriptor_t>(GRALLOC1_LAST_FUNCTION + 2);
+static const auto GRALLOC1_FUNCTION_LOCK_YCBCR =
+        static_cast<gralloc1_function_descriptor_t>(GRALLOC1_LAST_FUNCTION + 3);
+static const auto GRALLOC1_LAST_ADAPTER_FUNCTION = GRALLOC1_FUNCTION_LOCK_YCBCR;
+
+typedef gralloc1_error_t (*GRALLOC1_PFN_RETAIN_GRAPHIC_BUFFER)(
+        gralloc1_device_t* device, const android::GraphicBuffer* buffer);
+typedef gralloc1_error_t (*GRALLOC1_PFN_ALLOCATE_WITH_ID)(
+        gralloc1_device_t* device, gralloc1_buffer_descriptor_t descriptor,
+        gralloc1_backing_store_t id, buffer_handle_t* outBuffer);
+typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_LOCK_YCBCR)(
+        gralloc1_device_t* device, buffer_handle_t buffer,
+        uint64_t /*gralloc1_producer_usage_t*/ producerUsage,
+        uint64_t /*gralloc1_consumer_usage_t*/ consumerUsage,
+        const gralloc1_rect_t* accessRegion, struct android_ycbcr* outYCbCr,
+        int32_t acquireFence);
+
+
+namespace android {
 namespace Gralloc1 {
 
 class Device;
diff --git a/include/ui/Gralloc1On0Adapter.h b/include/ui/Gralloc1On0Adapter.h
index b09fdc6..6379a08 100644
--- a/include/ui/Gralloc1On0Adapter.h
+++ b/include/ui/Gralloc1On0Adapter.h
@@ -35,33 +35,6 @@
 
 struct gralloc_module_t;
 
-// This is not an "official" capability (i.e., it is not found in gralloc1.h),
-// but we will use it to detect that we are running through the adapter, which
-// is capable of collaborating with GraphicBuffer such that queries on a
-// buffer_handle_t succeed
-static const auto GRALLOC1_CAPABILITY_ON_ADAPTER =
-        static_cast<gralloc1_capability_t>(GRALLOC1_LAST_CAPABILITY + 1);
-
-static const auto GRALLOC1_FUNCTION_RETAIN_GRAPHIC_BUFFER =
-        static_cast<gralloc1_function_descriptor_t>(GRALLOC1_LAST_FUNCTION + 1);
-static const auto GRALLOC1_FUNCTION_ALLOCATE_WITH_ID =
-        static_cast<gralloc1_function_descriptor_t>(GRALLOC1_LAST_FUNCTION + 2);
-static const auto GRALLOC1_FUNCTION_LOCK_YCBCR =
-        static_cast<gralloc1_function_descriptor_t>(GRALLOC1_LAST_FUNCTION + 3);
-static const auto GRALLOC1_LAST_ADAPTER_FUNCTION = GRALLOC1_FUNCTION_LOCK_YCBCR;
-
-typedef gralloc1_error_t (*GRALLOC1_PFN_RETAIN_GRAPHIC_BUFFER)(
-        gralloc1_device_t* device, const android::GraphicBuffer* buffer);
-typedef gralloc1_error_t (*GRALLOC1_PFN_ALLOCATE_WITH_ID)(
-        gralloc1_device_t* device, gralloc1_buffer_descriptor_t descriptor,
-        gralloc1_backing_store_t id, buffer_handle_t* outBuffer);
-typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_LOCK_YCBCR)(
-        gralloc1_device_t* device, buffer_handle_t buffer,
-        uint64_t /*gralloc1_producer_usage_t*/ producerUsage,
-        uint64_t /*gralloc1_consumer_usage_t*/ consumerUsage,
-        const gralloc1_rect_t* accessRegion, struct android_ycbcr* outYCbCr,
-        int32_t acquireFence);
-
 namespace android {
 
 class Gralloc1On0Adapter : public gralloc1_device_t
diff --git a/include/ui/GrallocMapper.h b/include/ui/GrallocMapper.h
index 5a23b68..5a0d64b 100644
--- a/include/ui/GrallocMapper.h
+++ b/include/ui/GrallocMapper.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_UI_GRALLOC_MAPPER_H
 #define ANDROID_UI_GRALLOC_MAPPER_H
 
-#include <memory>
-
 #include <android/hardware/graphics/mapper/2.0/IMapper.h>
 #include <system/window.h>
 
diff --git a/include/ui/GraphicBuffer.h b/include/ui/GraphicBuffer.h
index 759c9ec..040d1e7 100644
--- a/include/ui/GraphicBuffer.h
+++ b/include/ui/GraphicBuffer.h
@@ -20,13 +20,15 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <string>
+
 #include <ui/ANativeObjectBase.h>
 #include <ui/PixelFormat.h>
 #include <ui/Rect.h>
 #include <utils/Flattenable.h>
 #include <utils/RefBase.h>
 
-#include <string>
+#include <hardware/gralloc.h>
 
 struct ANativeWindowBuffer;
 
diff --git a/include/ui/GraphicBufferAllocator.h b/include/ui/GraphicBufferAllocator.h
index 2ccc44b..e97122b 100644
--- a/include/ui/GraphicBufferAllocator.h
+++ b/include/ui/GraphicBufferAllocator.h
@@ -20,11 +20,14 @@
 
 #include <stdint.h>
 
+#include <memory>
+#include <string>
+
 #include <cutils/native_handle.h>
 
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
-#include <utils/threads.h>
+#include <utils/Mutex.h>
 #include <utils/Singleton.h>
 
 #include <ui/Gralloc1.h>
@@ -36,7 +39,6 @@
 class Allocator;
 }
 
-class Gralloc1Loader;
 class GraphicBufferMapper;
 class String8;
 
diff --git a/include/ui/GraphicBufferMapper.h b/include/ui/GraphicBufferMapper.h
index 001769f..b6d4021 100644
--- a/include/ui/GraphicBufferMapper.h
+++ b/include/ui/GraphicBufferMapper.h
@@ -20,10 +20,18 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <memory>
+
 #include <ui/Gralloc1.h>
 
 #include <utils/Singleton.h>
 
+
+// Needed by code that still uses the GRALLOC_USAGE_* constants.
+// when/if we get rid of gralloc, we should provide aliases or fix call sites.
+#include <hardware/gralloc.h>
+
+
 namespace android {
 
 // ---------------------------------------------------------------------------
diff --git a/libs/math/Android.bp b/libs/math/Android.bp
new file mode 100644
index 0000000..3ef8b4a
--- /dev/null
+++ b/libs/math/Android.bp
@@ -0,0 +1,21 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_static {
+    name: "libmath",
+    host_supported: true,
+    export_include_dirs: ["include"],
+}
+
+subdirs = ["tests"]
diff --git a/libs/math/MODULE_LICENSE_APACHE2 b/libs/math/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/libs/math/MODULE_LICENSE_APACHE2
diff --git a/libs/math/NOTICE b/libs/math/NOTICE
new file mode 100644
index 0000000..c5b1efa
--- /dev/null
+++ b/libs/math/NOTICE
@@ -0,0 +1,190 @@
+
+   Copyright (c) 2005-2008, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
diff --git a/include/ui/TMatHelpers.h b/libs/math/include/math/TMatHelpers.h
similarity index 98%
rename from include/ui/TMatHelpers.h
rename to libs/math/include/math/TMatHelpers.h
index 8edf5f8..478e702 100644
--- a/include/ui/TMatHelpers.h
+++ b/libs/math/include/math/TMatHelpers.h
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#ifndef UI_TMATHELPERS_H_
-#define UI_TMATHELPERS_H_
+#pragma once
 
 #include <math.h>
 #include <stdint.h>
@@ -26,8 +25,8 @@
 #include <iomanip>
 #include <stdexcept>
 
-#include <ui/quat.h>
-#include <ui/TVecHelpers.h>
+#include <math/quat.h>
+#include <math/TVecHelpers.h>
 
 #include  <utils/String8.h>
 
@@ -636,5 +635,3 @@
 #undef UNLIKELY
 #undef PURE
 #undef CONSTEXPR
-
-#endif  // UI_TMATHELPERS_H_
diff --git a/include/ui/TQuatHelpers.h b/libs/math/include/math/TQuatHelpers.h
similarity index 98%
rename from include/ui/TQuatHelpers.h
rename to libs/math/include/math/TQuatHelpers.h
index 2f0f70f..f0a71ae 100644
--- a/include/ui/TQuatHelpers.h
+++ b/libs/math/include/math/TQuatHelpers.h
@@ -15,8 +15,7 @@
  */
 
 
-#ifndef UI_TQUATHELPERS_H_
-#define UI_TQUATHELPERS_H_
+#pragma once
 
 #include <math.h>
 #include <stdint.h>
@@ -24,7 +23,7 @@
 
 #include <iostream>
 
-#include <ui/vec3.h>
+#include <math/vec3.h>
 
 #define PURE __attribute__((pure))
 
@@ -299,6 +298,3 @@
 // -------------------------------------------------------------------------------------
 }  // namespace details
 }  // namespace android
-
-
-#endif  // UI_TQUATHELPERS_H_
diff --git a/include/ui/TVecHelpers.h b/libs/math/include/math/TVecHelpers.h
similarity index 99%
rename from include/ui/TVecHelpers.h
rename to libs/math/include/math/TVecHelpers.h
index 1884608..20f852f 100644
--- a/include/ui/TVecHelpers.h
+++ b/libs/math/include/math/TVecHelpers.h
@@ -15,8 +15,7 @@
  */
 
 
-#ifndef UI_TVECHELPERS_H_
-#define UI_TVECHELPERS_H_
+#pragma once
 
 #include <math.h>
 #include <stdint.h>
@@ -607,6 +606,3 @@
 // -------------------------------------------------------------------------------------
 }  // namespace details
 }  // namespace android
-
-
-#endif  // UI_TVECHELPERS_H_
diff --git a/include/ui/half.h b/libs/math/include/math/half.h
similarity index 98%
rename from include/ui/half.h
rename to libs/math/include/math/half.h
index 7a271dc..3ca8bd1 100644
--- a/include/ui/half.h
+++ b/libs/math/include/math/half.h
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#ifndef UI_HALF_H
-#define UI_HALF_H
+#pragma once
 
 #include <stdint.h>
 #include <iosfwd>
@@ -204,5 +203,3 @@
 #undef LIKELY
 #undef UNLIKELY
 #undef CONSTEXPR
-
-#endif // UI_HALF_H
diff --git a/include/ui/mat2.h b/libs/math/include/math/mat2.h
similarity index 98%
rename from include/ui/mat2.h
rename to libs/math/include/math/mat2.h
index 37c7221..3e6cd4c 100644
--- a/include/ui/mat2.h
+++ b/libs/math/include/math/mat2.h
@@ -14,11 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef UI_MAT2_H_
-#define UI_MAT2_H_
+#pragma once
 
-#include <ui/TMatHelpers.h>
-#include <ui/vec2.h>
+#include <math/TMatHelpers.h>
+#include <math/vec2.h>
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -376,5 +375,3 @@
 
 #undef PURE
 #undef CONSTEXPR
-
-#endif  // UI_MAT2_H_
diff --git a/include/ui/mat3.h b/libs/math/include/math/mat3.h
similarity index 98%
rename from include/ui/mat3.h
rename to libs/math/include/math/mat3.h
index 4f5dba9..5c8a9b2 100644
--- a/include/ui/mat3.h
+++ b/libs/math/include/math/mat3.h
@@ -14,12 +14,11 @@
  * limitations under the License.
  */
 
-#ifndef UI_MAT3_H_
-#define UI_MAT3_H_
+#pragma once
 
-#include <ui/quat.h>
-#include <ui/TMatHelpers.h>
-#include <ui/vec3.h>
+#include <math/quat.h>
+#include <math/TMatHelpers.h>
+#include <math/vec3.h>
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -439,5 +438,3 @@
 
 #undef PURE
 #undef CONSTEXPR
-
-#endif  // UI_MAT3_H_
diff --git a/include/ui/mat4.h b/libs/math/include/math/mat4.h
similarity index 98%
rename from include/ui/mat4.h
rename to libs/math/include/math/mat4.h
index f63d40a..6119ba7 100644
--- a/include/ui/mat4.h
+++ b/libs/math/include/math/mat4.h
@@ -14,14 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef UI_MAT4_H_
-#define UI_MAT4_H_
+#pragma once
 
-#include <ui/mat3.h>
-#include <ui/quat.h>
-#include <ui/TMatHelpers.h>
-#include <ui/vec3.h>
-#include <ui/vec4.h>
+#include <math/mat3.h>
+#include <math/quat.h>
+#include <math/TMatHelpers.h>
+#include <math/vec3.h>
+#include <math/vec4.h>
 
 #include <stdint.h>
 #include <sys/types.h>
@@ -585,5 +584,3 @@
 
 #undef PURE
 #undef CONSTEXPR
-
-#endif  // UI_MAT4_H_
diff --git a/include/ui/quat.h b/libs/math/include/math/quat.h
similarity index 97%
rename from include/ui/quat.h
rename to libs/math/include/math/quat.h
index 5b8cd8b..1936a2b 100644
--- a/include/ui/quat.h
+++ b/libs/math/include/math/quat.h
@@ -14,13 +14,12 @@
  * limitations under the License.
  */
 
-#ifndef UI_QUAT_H_
-#define UI_QUAT_H_
+#pragma once
 
-#include <ui/half.h>
-#include <ui/TQuatHelpers.h>
-#include <ui/vec3.h>
-#include <ui/vec4.h>
+#include <math/half.h>
+#include <math/TQuatHelpers.h>
+#include <math/vec3.h>
+#include <math/vec4.h>
 
 #include <stdint.h>
 #include <sys/types.h>
@@ -191,5 +190,3 @@
 #pragma clang diagnostic pop
 
 #undef PURE
-
-#endif  // UI_QUAT_H_
diff --git a/include/ui/scalar.h b/libs/math/include/math/scalar.h
similarity index 94%
rename from include/ui/scalar.h
rename to libs/math/include/math/scalar.h
index 5f8329e..2eced92 100644
--- a/include/ui/scalar.h
+++ b/libs/math/include/math/scalar.h
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#ifndef UI_SCALAR_H
-#define UI_SCALAR_H
+#pragma once
 
 #include <algorithm>
 #include <cmath>
@@ -43,5 +42,3 @@
 }
 
 } // namespace std
-
-#endif // UI_SCALAR_H
diff --git a/include/ui/vec2.h b/libs/math/include/math/vec2.h
similarity index 96%
rename from include/ui/vec2.h
rename to libs/math/include/math/vec2.h
index 308d2b8..a347633 100644
--- a/include/ui/vec2.h
+++ b/libs/math/include/math/vec2.h
@@ -14,11 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef UI_VEC2_H_
-#define UI_VEC2_H_
+#pragma once
 
-#include <ui/TVecHelpers.h>
-#include <ui/half.h>
+#include <math/TVecHelpers.h>
+#include <math/half.h>
 #include <assert.h>
 #include <stdint.h>
 #include <sys/types.h>
@@ -124,5 +123,3 @@
 }  // namespace android
 
 #pragma clang diagnostic pop
-
-#endif  // UI_VEC2_H_
diff --git a/include/ui/vec3.h b/libs/math/include/math/vec3.h
similarity index 97%
rename from include/ui/vec3.h
rename to libs/math/include/math/vec3.h
index e3a6d14..009fd84 100644
--- a/include/ui/vec3.h
+++ b/libs/math/include/math/vec3.h
@@ -14,11 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef UI_VEC3_H_
-#define UI_VEC3_H_
+#pragma once
 
-#include <ui/vec2.h>
-#include <ui/half.h>
+#include <math/vec2.h>
+#include <math/half.h>
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -130,5 +129,3 @@
 }  // namespace android
 
 #pragma clang diagnostic pop
-
-#endif  // UI_VEC3_H_
diff --git a/include/ui/vec4.h b/libs/math/include/math/vec4.h
similarity index 96%
rename from include/ui/vec4.h
rename to libs/math/include/math/vec4.h
index 9346fb3..1e279fe 100644
--- a/include/ui/vec4.h
+++ b/libs/math/include/math/vec4.h
@@ -14,11 +14,10 @@
  * limitations under the License.
  */
 
-#ifndef UI_VEC4_H_
-#define UI_VEC4_H_
+#pragma once
 
-#include <ui/vec3.h>
-#include <ui/half.h>
+#include <math/vec3.h>
+#include <math/half.h>
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -127,5 +126,3 @@
 }  // namespace android
 
 #pragma clang diagnostic pop
-
-#endif  // UI_VEC4_H_
diff --git a/libs/math/tests/Android.bp b/libs/math/tests/Android.bp
new file mode 100644
index 0000000..0ed24a2
--- /dev/null
+++ b/libs/math/tests/Android.bp
@@ -0,0 +1,39 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+    name: "vec_test",
+    srcs: ["vec_test.cpp"],
+    static_libs: ["libmath"],
+}
+
+cc_test {
+    name: "mat_test",
+    srcs: ["mat_test.cpp"],
+    static_libs: ["libmath"],
+}
+
+cc_test {
+    name: "half_test",
+    srcs: ["half_test.cpp"],
+    static_libs: ["libmath"],
+}
+
+cc_test {
+    name: "quat_test",
+    srcs: ["quat_test.cpp"],
+    static_libs: ["libmath"],
+}
diff --git a/libs/ui/tests/half_test.cpp b/libs/math/tests/half_test.cpp
similarity index 98%
rename from libs/ui/tests/half_test.cpp
rename to libs/math/tests/half_test.cpp
index b2a5e5c..496a7ef 100644
--- a/libs/ui/tests/half_test.cpp
+++ b/libs/math/tests/half_test.cpp
@@ -19,8 +19,8 @@
 #include <math.h>
 #include <stdlib.h>
 
-#include <ui/half.h>
-#include <ui/vec4.h>
+#include <math/half.h>
+#include <math/vec4.h>
 
 #include <gtest/gtest.h>
 
diff --git a/libs/ui/tests/mat_test.cpp b/libs/math/tests/mat_test.cpp
similarity index 99%
rename from libs/ui/tests/mat_test.cpp
rename to libs/math/tests/mat_test.cpp
index 0f8e631..c365366 100644
--- a/libs/ui/tests/mat_test.cpp
+++ b/libs/math/tests/mat_test.cpp
@@ -24,8 +24,8 @@
 
 #include <gtest/gtest.h>
 
-#include <ui/mat2.h>
-#include <ui/mat4.h>
+#include <math/mat2.h>
+#include <math/mat4.h>
 
 namespace android {
 
diff --git a/libs/ui/tests/quat_test.cpp b/libs/math/tests/quat_test.cpp
similarity index 98%
rename from libs/ui/tests/quat_test.cpp
rename to libs/math/tests/quat_test.cpp
index f5cb659..c20771e 100644
--- a/libs/ui/tests/quat_test.cpp
+++ b/libs/math/tests/quat_test.cpp
@@ -22,10 +22,10 @@
 #include <random>
 #include <functional>
 
-#include <ui/quat.h>
-#include <ui/mat4.h>
-#include <ui/vec3.h>
-#include <ui/vec4.h>
+#include <math/quat.h>
+#include <math/mat4.h>
+#include <math/vec3.h>
+#include <math/vec4.h>
 
 #include <gtest/gtest.h>
 
diff --git a/libs/ui/tests/vec_test.cpp b/libs/math/tests/vec_test.cpp
similarity index 99%
rename from libs/ui/tests/vec_test.cpp
rename to libs/math/tests/vec_test.cpp
index 7c749a7..79ae2e4 100644
--- a/libs/ui/tests/vec_test.cpp
+++ b/libs/math/tests/vec_test.cpp
@@ -19,7 +19,7 @@
 #include <math.h>
 #include <stdlib.h>
 
-#include <ui/vec4.h>
+#include <math/vec4.h>
 
 #include <gtest/gtest.h>
 
diff --git a/libs/ui/Android.bp b/libs/ui/Android.bp
index ea18644..d1bfa18 100644
--- a/libs/ui/Android.bp
+++ b/libs/ui/Android.bp
@@ -78,9 +78,13 @@
 
     static_libs: [
         "libarect",
+        "libmath",
     ],
 
-    export_static_lib_headers: ["libarect"],
+    export_static_lib_headers: [
+        "libarect",
+        "libmath",
+    ],
 }
 
 subdirs = ["tests"]
diff --git a/libs/ui/Gralloc1.cpp b/libs/ui/Gralloc1.cpp
index 7952ed6..64a8b40 100644
--- a/libs/ui/Gralloc1.cpp
+++ b/libs/ui/Gralloc1.cpp
@@ -18,6 +18,7 @@
 
 #include <ui/Gralloc1.h>
 #include <ui/GraphicBuffer.h>
+#include <ui/Gralloc1On0Adapter.h>
 
 #include <vector>
 
diff --git a/libs/ui/Gralloc1On0Adapter.cpp b/libs/ui/Gralloc1On0Adapter.cpp
index 4cc0e4b..b8bc6c4 100644
--- a/libs/ui/Gralloc1On0Adapter.cpp
+++ b/libs/ui/Gralloc1On0Adapter.cpp
@@ -18,10 +18,13 @@
 #define LOG_TAG "Gralloc1On0Adapter"
 //#define LOG_NDEBUG 0
 
+#include <ui/Gralloc1On0Adapter.h>
+
+
 #include <hardware/gralloc.h>
 
-#include <ui/Gralloc1On0Adapter.h>
 #include <ui/GraphicBuffer.h>
+#include <ui/Gralloc1.h>
 
 #include <utils/Log.h>
 
diff --git a/libs/ui/GrallocAllocator.cpp b/libs/ui/GrallocAllocator.cpp
index ca67990..5c5d5b3 100644
--- a/libs/ui/GrallocAllocator.cpp
+++ b/libs/ui/GrallocAllocator.cpp
@@ -16,9 +16,10 @@
 
 #define LOG_TAG "GrallocAllocator"
 
-#include <log/log.h>
 #include <ui/GrallocAllocator.h>
 
+#include <log/log.h>
+
 namespace android {
 
 namespace Gralloc2 {
diff --git a/libs/ui/GrallocMapper.cpp b/libs/ui/GrallocMapper.cpp
index b9e9040..6884dcb 100644
--- a/libs/ui/GrallocMapper.cpp
+++ b/libs/ui/GrallocMapper.cpp
@@ -16,11 +16,9 @@
 
 #define LOG_TAG "GrallocMapper"
 
-#include <array>
-#include <string>
+#include <ui/GrallocMapper.h>
 
 #include <log/log.h>
-#include <ui/GrallocMapper.h>
 
 namespace android {
 
diff --git a/libs/ui/GraphicBuffer.cpp b/libs/ui/GraphicBuffer.cpp
index b544426..37ebfb3 100644
--- a/libs/ui/GraphicBuffer.cpp
+++ b/libs/ui/GraphicBuffer.cpp
@@ -16,18 +16,10 @@
 
 #define LOG_TAG "GraphicBuffer"
 
-#include <stdlib.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/Errors.h>
-#include <utils/Log.h>
-
-#include <ui/GrallocMapper.h>
 #include <ui/GraphicBuffer.h>
+#include <ui/GrallocMapper.h>
 #include <ui/GraphicBufferAllocator.h>
 #include <ui/GraphicBufferMapper.h>
-#include <ui/PixelFormat.h>
 
 namespace android {
 
diff --git a/libs/ui/GraphicBufferAllocator.cpp b/libs/ui/GraphicBufferAllocator.cpp
index b14110e..3f18bbc 100644
--- a/libs/ui/GraphicBufferAllocator.cpp
+++ b/libs/ui/GraphicBufferAllocator.cpp
@@ -18,13 +18,15 @@
 #define LOG_TAG "GraphicBufferAllocator"
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 
+#include <ui/GraphicBufferAllocator.h>
+
+#include <stdio.h>
+
 #include <log/log.h>
 #include <utils/Singleton.h>
 #include <utils/String8.h>
 #include <utils/Trace.h>
 
-#include <ui/GraphicBufferAllocator.h>
-#include <ui/Gralloc1On0Adapter.h>
 #include <ui/GrallocAllocator.h>
 #include <ui/GrallocMapper.h>
 #include <ui/GraphicBufferMapper.h>
diff --git a/libs/ui/GraphicBufferMapper.cpp b/libs/ui/GraphicBufferMapper.cpp
index f418f7f..656472f 100644
--- a/libs/ui/GraphicBufferMapper.cpp
+++ b/libs/ui/GraphicBufferMapper.cpp
@@ -18,8 +18,7 @@
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
 
-#include <stdint.h>
-#include <errno.h>
+#include <ui/GraphicBufferMapper.h>
 
 // We would eliminate the non-conforming zero-length array, but we can't since
 // this is effectively included from the Linux kernel
@@ -28,13 +27,10 @@
 #include <sync/sync.h>
 #pragma clang diagnostic pop
 
-#include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
-#include <ui/Gralloc1On0Adapter.h>
 #include <ui/GrallocMapper.h>
-#include <ui/GraphicBufferMapper.h>
 #include <ui/GraphicBuffer.h>
 
 #include <system/graphics.h>
diff --git a/libs/ui/tests/Android.bp b/libs/ui/tests/Android.bp
index c4f34d5..6733505 100644
--- a/libs/ui/tests/Android.bp
+++ b/libs/ui/tests/Android.bp
@@ -21,26 +21,6 @@
 }
 
 cc_test {
-    name: "vec_test",
-    srcs: ["vec_test.cpp"],
-}
-
-cc_test {
-    name: "mat_test",
-    srcs: ["mat_test.cpp"],
-}
-
-cc_test {
-    name: "half_test",
-    srcs: ["half_test.cpp"],
-}
-
-cc_test {
-    name: "quat_test",
-    srcs: ["quat_test.cpp"],
-}
-
-cc_test {
     name: "colorspace_test",
     shared_libs: ["libui"],
     srcs: ["colorspace_test.cpp"],
diff --git a/libs/vr/libdvrgraphics/Android.mk b/libs/vr/libdvrgraphics/Android.mk
index b95b18e..b9e601c 100644
--- a/libs/vr/libdvrgraphics/Android.mk
+++ b/libs/vr/libdvrgraphics/Android.mk
@@ -29,6 +29,9 @@
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(includeFiles)
 LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
 LOCAL_STATIC_LIBRARIES := $(staticLibraries)
+# Rather than add this header-file-only library to all users of libdvrgraphics,
+# include it here.
+LOCAL_WHOLE_STATIC_LIBRARIES := libarect
 LOCAL_MODULE := libdvrgraphics
 include $(BUILD_STATIC_LIBRARY)
 
diff --git a/libs/vr/libposepredictor/Android.mk b/libs/vr/libposepredictor/Android.mk
index 761fe06..2217819 100755
--- a/libs/vr/libposepredictor/Android.mk
+++ b/libs/vr/libposepredictor/Android.mk
@@ -15,18 +15,18 @@
 LOCAL_PATH := $(call my-dir)
 
 sourceFiles := \
-        pose_predictor.cpp \
+        predictor.cpp \
         buffered_predictor.cpp \
-        linear_pose_predictor.cpp \
-        polynomial_pose_predictor.cpp \
+        linear_predictor.cpp \
+        polynomial_predictor.cpp \
+        dvr_pose_predictor.cpp \
 
 includeFiles := \
-        $(LOCAL_PATH)/include
+        $(LOCAL_PATH)/include \
+        external/eigen \
 
 staticLibraries := \
-        libdvrcommon \
         libsensor \
-        libpdx_default_transport \
 
 sharedLibraries := \
 
@@ -42,13 +42,12 @@
 LOCAL_MODULE := libposepredictor
 include $(BUILD_STATIC_LIBRARY)
 
-
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := optional
 LOCAL_SRC_FILES := \
-        pose_predictor_tests.cpp \
-        linear_pose_predictor_tests.cpp \
-        polynomial_pose_predictor_tests.cpp \
+        predictor_tests.cpp \
+        linear_predictor_tests.cpp \
+        polynomial_predictor_tests.cpp \
 
 LOCAL_STATIC_LIBRARIES := libposepredictor $(staticLibraries)
 LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
diff --git a/libs/vr/libposepredictor/buffered_predictor.cpp b/libs/vr/libposepredictor/buffered_predictor.cpp
index 08fd524..f3b41dc 100644
--- a/libs/vr/libposepredictor/buffered_predictor.cpp
+++ b/libs/vr/libposepredictor/buffered_predictor.cpp
@@ -1,13 +1,12 @@
-#include <private/dvr/buffered_predictor.h>
+#include <buffered_predictor.h>
 
-namespace android {
-namespace dvr {
+namespace posepredictor {
 
 BufferedPredictor::BufferedPredictor(size_t buffer_size) {
   buffer_.resize(buffer_size);
 }
 
-void BufferedPredictor::BufferSample(const Sample& sample) {
+void BufferedPredictor::BufferSample(const Pose& sample) {
   const auto& prev_sample = buffer_[current_pose_index_];
 
   // If we are updating a sample (the same time stamp), do not advance the
@@ -22,19 +21,18 @@
   if (PrevSample(1).orientation.coeffs().dot(sample.orientation.coeffs()) < 0) {
     // Flip the quaternion to be closest to the previous sample.
     buffer_[current_pose_index_].orientation =
-        quatd(-sample.orientation.w(), -sample.orientation.x(),
-              -sample.orientation.y(), -sample.orientation.z());
+        quat(-sample.orientation.w(), -sample.orientation.x(),
+             -sample.orientation.y(), -sample.orientation.z());
   }
 
   ++num_poses_added_;
 }
 
-const PosePredictor::Sample& BufferedPredictor::PrevSample(size_t index) const {
+const Pose& BufferedPredictor::PrevSample(size_t index) const {
   // We must not request a pose too far in the past.
   assert(index < buffer_.size());
   return buffer_[(current_pose_index_ - index + buffer_.size()) %
                  buffer_.size()];
 }
 
-}  // namespace dvr
-}  // namespace android
+}  // namespace posepredictor
diff --git a/libs/vr/libposepredictor/dvr_pose_predictor.cpp b/libs/vr/libposepredictor/dvr_pose_predictor.cpp
new file mode 100644
index 0000000..7f2ecc0
--- /dev/null
+++ b/libs/vr/libposepredictor/dvr_pose_predictor.cpp
@@ -0,0 +1,70 @@
+#include <private/dvr/dvr_pose_predictor.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+template <typename Vec3Type>
+float32x4_t FromVec3(const Vec3Type& from) {
+  return {static_cast<float>(from.x()), static_cast<float>(from.y()),
+          static_cast<float>(from.z()), 0};
+}
+
+template <typename QuatType>
+float32x4_t FromQuat(const QuatType& from) {
+  return {static_cast<float>(from.x()), static_cast<float>(from.y()),
+          static_cast<float>(from.z()), static_cast<float>(from.w())};
+}
+
+}  //  namespace
+
+void AddPredictorPose(posepredictor::Predictor* predictor,
+                      const posepredictor::vec3& start_t_head,
+                      const posepredictor::quat& start_q_head,
+                      int64_t pose_timestamp, DvrPoseAsync* out) {
+  // Feed the predictor.
+  predictor->Add(
+      posepredictor::Pose{pose_timestamp, start_t_head, start_q_head});
+
+  // Fill the output.
+  out->timestamp_ns = pose_timestamp;
+
+  out->translation = FromVec3(start_t_head);
+  out->orientation = FromQuat(start_q_head);
+
+  out->right_translation = out->translation;
+  out->right_orientation = out->orientation;
+
+  const auto velocity = predictor->PredictVelocity(pose_timestamp);
+
+  out->velocity = FromVec3(velocity.linear);
+  out->angular_velocity = FromVec3(velocity.angular);
+
+  out->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
+  memset(out->pad, 0, sizeof(out->pad));
+}
+
+void PredictPose(const posepredictor::Predictor* predictor, int64_t left_ns,
+                 int64_t right_ns, DvrPoseAsync* out) {
+  const auto left_pose = predictor->Predict(left_ns);
+  const auto right_pose = predictor->Predict(right_ns);
+  const auto velocity = predictor->PredictVelocity((left_ns + right_ns) / 2);
+
+  // Fill the output.
+  out->timestamp_ns = left_ns;
+
+  out->translation = FromVec3(left_pose.position);
+  out->orientation = FromQuat(left_pose.orientation);
+
+  out->right_translation = FromVec3(right_pose.position);
+  out->right_orientation = FromQuat(right_pose.orientation);
+
+  out->velocity = FromVec3(velocity.linear);
+  out->angular_velocity = FromVec3(velocity.angular);
+
+  out->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
+  memset(out->pad, 0, sizeof(out->pad));
+}
+
+}  //  dvr
+}  //  android
diff --git a/libs/vr/libposepredictor/include/private/dvr/buffered_predictor.h b/libs/vr/libposepredictor/include/buffered_predictor.h
similarity index 60%
rename from libs/vr/libposepredictor/include/private/dvr/buffered_predictor.h
rename to libs/vr/libposepredictor/include/buffered_predictor.h
index 89d89e1..eab0150 100644
--- a/libs/vr/libposepredictor/include/private/dvr/buffered_predictor.h
+++ b/libs/vr/libposepredictor/include/buffered_predictor.h
@@ -1,33 +1,32 @@
-#ifndef ANDROID_DVR_BUFFERED_PREDICTOR_H_
-#define ANDROID_DVR_BUFFERED_PREDICTOR_H_
+#ifndef POSEPREDICTOR_BUFFERED_PREDICTOR_H_
+#define POSEPREDICTOR_BUFFERED_PREDICTOR_H_
 
 #include <vector>
 
-#include "pose_predictor.h"
+#include "predictor.h"
 
-namespace android {
-namespace dvr {
+namespace posepredictor {
 
 // Keeps the previous n poses around in a ring buffer.
 // The orientations are also unrolled so that a . b > 0 for two subsequent
 // quaternions a and b.
-class BufferedPredictor : public PosePredictor {
+class BufferedPredictor : public Predictor {
  public:
   BufferedPredictor(size_t buffer_size);
   ~BufferedPredictor() = default;
 
  protected:
   // Add a pose sample into the buffer.
-  void BufferSample(const Sample& sample);
+  void BufferSample(const Pose& sample);
 
   // Grab a previous sample.
   // index = 0: last sample
   // index = 1: the one before that
   // ...
-  const Sample& PrevSample(size_t index) const;
+  const Pose& PrevSample(size_t index) const;
 
   // Where we keep the last n poses.
-  std::vector<Sample> buffer_;
+  std::vector<Pose> buffer_;
 
   // Where the last valid pose is in the buffer.
   size_t current_pose_index_ = 0;
@@ -36,7 +35,6 @@
   size_t num_poses_added_ = 0;
 };
 
-}  // namespace dvr
-}  // namespace android
+}  // namespace posepredictor
 
-#endif  // ANDROID_DVR_BUFFERED_PREDICTOR_H_
+#endif  // POSEPREDICTOR_BUFFERED_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/linear_predictor.h b/libs/vr/libposepredictor/include/linear_predictor.h
new file mode 100644
index 0000000..0d17ec5
--- /dev/null
+++ b/libs/vr/libposepredictor/include/linear_predictor.h
@@ -0,0 +1,43 @@
+#ifndef POSEPREDICTOR_LINEAR_POSE_PREDICTOR_H_
+#define POSEPREDICTOR_LINEAR_POSE_PREDICTOR_H_
+
+#include "predictor.h"
+
+namespace posepredictor {
+
+// This class makes a linear prediction using the last two samples we received.
+class LinearPosePredictor : public Predictor {
+ public:
+  LinearPosePredictor() = default;
+
+  // Add a new sample.
+  void Add(const Pose& sample) override;
+
+  // Predict using the last two samples.
+  Pose Predict(int64_t time_ns) const override;
+
+  // Just copy the velocity over.
+  Velocity PredictVelocity(int64_t time_ns) const override;
+
+ private:
+  // The index of the last sample we received.
+  size_t current_index_ = 0;
+
+  // The previous two samples.
+  Pose samples_[2];
+
+  // Experimental
+  bool forward_predict_angular_speed_ = false;
+
+  // Transient variables updated when a sample is added.
+  vec3 velocity_ = vec3::Zero();
+  vec3 rotational_velocity_ = vec3::Zero();
+  vec3 rotational_axis_ = vec3::Zero();
+  real last_angular_speed_ = 0;
+  real angular_speed_ = 0;
+  real angular_accel_ = 0;
+};
+
+}  // namespace posepredictor
+
+#endif  // POSEPREDICTOR_LINEAR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/polynomial_predictor.h b/libs/vr/libposepredictor/include/polynomial_predictor.h
new file mode 100644
index 0000000..762afd3
--- /dev/null
+++ b/libs/vr/libposepredictor/include/polynomial_predictor.h
@@ -0,0 +1,168 @@
+#ifndef POSEPREDICTOR_POLYNOMIAL_POSE_PREDICTOR_H_
+#define POSEPREDICTOR_POLYNOMIAL_POSE_PREDICTOR_H_
+
+#include <vector>
+
+#include <Eigen/Dense>
+
+#include "buffered_predictor.h"
+
+namespace posepredictor {
+
+// Make a polynomial prediction of the form
+// y = coefficients_[0] + coefficients_[1] * t + coefficients_[2] * t^2 + ...
+// where t is time and y is the position and orientation.
+// We recompute the coefficients whenever we add a new sample using
+// training_window previous samples.
+template <size_t PolynomialDegree, size_t TrainingWindow>
+class PolynomialPosePredictor : public BufferedPredictor {
+ public:
+  PolynomialPosePredictor(real regularization = 1e-9)
+      : BufferedPredictor(TrainingWindow), regularization_(regularization) {
+    static_assert(PolynomialDegree + 1 >= TrainingWindow,
+                  "Underconstrained polynomial regressor");
+  }
+
+  ~PolynomialPosePredictor() = default;
+
+  // We convert pose samples into a vector for matrix arithmetic using this
+  // mapping.
+  enum Components {
+    kPositionX = 0,
+    kPositionY,
+    kPositionZ,
+    kOrientationX,
+    kOrientationY,
+    kOrientationZ,
+    kOrientationW,
+    kNumComponents
+  };
+
+  // Add a new sample.
+  void Add(const Pose& sample) override {
+    // Add the sample to the ring buffer.
+    BufferedPredictor::BufferSample(sample);
+
+    Eigen::Matrix<real, TrainingWindow, kNumComponents> values;
+
+    // Get the pose samples into matrices for fitting.
+    real t_vector[TrainingWindow];
+    for (size_t i = 0; i < TrainingWindow; ++i) {
+      const auto& prev_sample = PrevSample(i);
+
+      t_vector[i] = NsToT(prev_sample.time_ns);
+
+      // Save the values we will be fitting to at each sample time.
+      values(i, kPositionX) = prev_sample.position.x();
+      values(i, kPositionY) = prev_sample.position.y();
+      values(i, kPositionZ) = prev_sample.position.z();
+      values(i, kOrientationX) = prev_sample.orientation.x();
+      values(i, kOrientationY) = prev_sample.orientation.y();
+      values(i, kOrientationZ) = prev_sample.orientation.z();
+      values(i, kOrientationW) = prev_sample.orientation.w();
+    }
+
+    // Some transient matrices for solving for coefficient matrix.
+    Eigen::Matrix<real, PolynomialDegree + 1, PolynomialDegree + 1> M;
+    Eigen::Matrix<real, PolynomialDegree + 1, 1> d;
+    Eigen::Matrix<real, PolynomialDegree + 1, 1> p;
+
+    // Create a polynomial fit for each component.
+    for (size_t component = 0; component < kNumComponents; ++component) {
+      // A = [ 1 t t^2 ... ]'
+      // x = [ coefficients[0] coefficients[1] .... ]'
+      // b = [ position.x ]'
+      // We would like to solve A' x + regularization * I = b'
+      // given the samples we have in our training window.
+      //
+      // The loop below will compute:
+      // M = A' * A
+      // d = A' * b
+      // so we can solve M * coefficients + regularization * I = b
+
+      M.setIdentity();
+      d.setZero();
+      p[0] = 1;
+
+      // M = regularization * I
+      M = M * regularization_;
+
+      // Accumulate the poses in the training window.
+      for (size_t i = 0; i < TrainingWindow; ++i) {
+        // Compute the polynomial at this sample.
+        for (size_t j = 1; j <= PolynomialDegree; ++j) {
+          p[j] = p[j - 1] * t_vector[i];
+        }
+
+        // Accumulate the left and right hand sides.
+        M = M + p * p.transpose();
+        d = d + p * values(i, component);
+      }
+
+      // M is symmetric, positive semi-definite.
+      // Note: This is not the most accurate solver out there but is fast.
+      coefficients_.row(component) = Eigen::LLT<Eigen::MatrixXd>(M).solve(d);
+    }
+  }
+
+  // Predict using the polynomial coefficients.
+  Pose Predict(int64_t time_ns) const override {
+    // Predict the left side.
+    const auto components = SamplePolynomial(time_ns);
+
+    return {time_ns,
+            vec3(components[kPositionX], components[kPositionY],
+                 components[kPositionZ]),
+            quat(components[kOrientationW], components[kOrientationX],
+                 components[kOrientationY], components[kOrientationZ])
+                .normalized()};
+  }
+
+ private:
+  // Evaluate the polynomial at a particular time.
+  Eigen::Matrix<real, kNumComponents, 1> SamplePolynomial(
+      int64_t time_ns) const {
+    const auto t = NsToT(time_ns);
+    Eigen::Matrix<real, PolynomialDegree + 1, 1> polynomial;
+    real current_polynomial = t;
+
+    // Compute polynomial = [ 1 t t^2 ... ]
+    polynomial[0] = 1;
+    for (size_t degree = 1; degree <= PolynomialDegree;
+         ++degree, current_polynomial *= t) {
+      polynomial[degree] = polynomial[degree - 1] * t;
+    }
+
+    // The coefficients_ = [ numComponents x (polynomial degree + 1) ].
+    return coefficients_ * polynomial;
+  }
+
+  // Convert a time in nanoseconds to t.
+  // We could use the seconds as t but this would create make it more difficult
+  // to tweak the regularization amount. So we subtract the last sample time so
+  // the scale of the regularization constant doesn't change as a function of
+  // time.
+  real NsToT(int64_t time_ns) const {
+    return NsToSeconds(time_ns - buffer_[current_pose_index_].time_ns);
+  }
+
+  // The ridge regularization constant.
+  real regularization_;
+
+  // This is where we store the polynomial coefficients.
+  Eigen::Matrix<real, kNumComponents, PolynomialDegree + 1> coefficients_;
+};
+
+// Some common polynomial types.
+extern template class PolynomialPosePredictor<1, 2>;
+extern template class PolynomialPosePredictor<2, 3>;
+extern template class PolynomialPosePredictor<3, 4>;
+extern template class PolynomialPosePredictor<4, 5>;
+
+using QuadricPosePredictor = PolynomialPosePredictor<2, 3>;
+using CubicPosePredictor = PolynomialPosePredictor<3, 4>;
+using QuarticPosePredictor = PolynomialPosePredictor<4, 5>;
+
+}  // namespace posepredictor
+
+#endif  // POSEPREDICTOR_POLYNOMIAL_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/predictor.h b/libs/vr/libposepredictor/include/predictor.h
new file mode 100644
index 0000000..78db272
--- /dev/null
+++ b/libs/vr/libposepredictor/include/predictor.h
@@ -0,0 +1,73 @@
+#ifndef POSEPREDICTOR_POSE_PREDICTOR_H_
+#define POSEPREDICTOR_POSE_PREDICTOR_H_
+
+#include <Eigen/Core>
+#include <Eigen/Geometry>
+
+// This is the only file you need to include for pose prediction.
+
+namespace posepredictor {
+
+// The precision for the predictor.
+// TODO(okana): double precision is probably not necessary.
+typedef double real;
+
+using vec3 = Eigen::Matrix<real, 3, 1>;
+using quat = Eigen::Quaternion<real>;
+
+// Encapsulates a pose sample.
+struct Pose {
+  int64_t time_ns = 0;
+  vec3 position = vec3::Zero();
+  quat orientation = quat::Identity();
+};
+
+// Encapsulates the derivative at a time.
+struct Velocity {
+  vec3 linear = vec3::Zero();
+  vec3 angular = vec3::Zero();
+};
+
+// The preset types we support.
+enum class PredictorType { Linear, Quadric, Cubic };
+
+// This is an abstract base class for prediction 6dof pose given
+// a set of samples.
+class Predictor {
+ public:
+  Predictor() = default;
+  virtual ~Predictor() = default;
+
+  // The nanoseconds to use for finite differencing.
+  static constexpr int64_t kFiniteDifferenceNs = 100;
+
+  // Instantiate a new pose predictor for a type.
+  static std::unique_ptr<Predictor> Create(PredictorType type);
+
+  // Compute the angular velocity from orientation start_orientation to
+  // end_orientation in delta_time.
+  static vec3 AngularVelocity(const quat& start_orientation,
+                              const quat& end_orientation, real delta_time);
+
+  // Add a pose sample coming from the sensors.
+  virtual void Add(const Pose& sample) = 0;
+
+  // Make a pose prediction for at specific time.
+  virtual Pose Predict(int64_t time_ns) const = 0;
+
+  // Evaluate velocity at a particular time.
+  // The default implementation uses finite differencing.
+  virtual Velocity PredictVelocity(int64_t time_ns) const;
+
+  // Helpers
+  static real NsToSeconds(int64_t time_ns) {
+    return static_cast<real>(time_ns / 1e9);
+  }
+  static int64_t SecondsToNs(real seconds) {
+    return static_cast<int64_t>(seconds * 1e9);
+  }
+};
+
+}  // namespace posepredictor
+
+#endif  // POSEPREDICTOR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/private/dvr/dvr_pose_predictor.h b/libs/vr/libposepredictor/include/private/dvr/dvr_pose_predictor.h
new file mode 100644
index 0000000..bd2dcbc
--- /dev/null
+++ b/libs/vr/libposepredictor/include/private/dvr/dvr_pose_predictor.h
@@ -0,0 +1,25 @@
+#ifndef ANDROID_DVR_POSE_PREDICTOR_H_
+#define ANDROID_DVR_POSE_PREDICTOR_H_
+
+#include <dvr/pose_client.h>
+#include <predictor.h>
+
+// Some shim functions for connecting dvr to pose predictor.
+
+namespace android {
+namespace dvr {
+
+// Feed a pose to the predictor.
+void AddPredictorPose(posepredictor::Predictor* predictor,
+                      const posepredictor::vec3& start_t_head,
+                      const posepredictor::quat& start_q_head,
+                      int64_t pose_timestamp, DvrPoseAsync* out);
+
+// Make a prediction for left and right eyes.
+void PredictPose(const posepredictor::Predictor* predictor, int64_t left_ns,
+                 int64_t right_ns, DvrPoseAsync* out);
+
+}  // namespace dvr
+}  // namespace android
+
+#endif  // ANDROID_DVR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/private/dvr/linear_pose_predictor.h b/libs/vr/libposepredictor/include/private/dvr/linear_pose_predictor.h
deleted file mode 100644
index 1efe938..0000000
--- a/libs/vr/libposepredictor/include/private/dvr/linear_pose_predictor.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef ANDROID_DVR_POSE_PREDICTOR_H_
-#define ANDROID_DVR_POSE_PREDICTOR_H_
-
-#include <private/dvr/pose_predictor.h>
-
-namespace android {
-namespace dvr {
-
-// This class makes a linear prediction using the last two samples we received.
-class LinearPosePredictor : public PosePredictor {
- public:
-  LinearPosePredictor() = default;
-
-  // Add a new sample.
-  void Add(const Sample& sample, DvrPoseAsync* out_pose) override;
-
-  // Predict using the last two samples.
-  void Predict(int64_t left_time_ns, int64_t right_time_ns,
-               DvrPoseAsync* out_pose) const override;
-
- private:
-  // The index of the last sample we received.
-  size_t current_index_ = 0;
-
-  // The previous two samples.
-  Sample samples_[2];
-
-  // Experimental
-  bool forward_predict_angular_speed_ = false;
-
-  // Transient variables updated when a sample is added.
-  vec3d velocity_ = vec3d::Zero();
-  vec3d rotational_velocity_ = vec3d::Zero();
-  vec3d rotational_axis_ = vec3d::Zero();
-  double last_angular_speed_ = 0;
-  double angular_speed_ = 0;
-  double angular_accel_ = 0;
-};
-
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/private/dvr/polynomial_pose_predictor.h b/libs/vr/libposepredictor/include/private/dvr/polynomial_pose_predictor.h
deleted file mode 100644
index 7abb486..0000000
--- a/libs/vr/libposepredictor/include/private/dvr/polynomial_pose_predictor.h
+++ /dev/null
@@ -1,219 +0,0 @@
-#ifndef ANDROID_DVR_POLYNOMIAL_POSE_PREDICTOR_H_
-#define ANDROID_DVR_POLYNOMIAL_POSE_PREDICTOR_H_
-
-#include <vector>
-
-#include <Eigen/Dense>
-
-#include "buffered_predictor.h"
-
-namespace android {
-namespace dvr {
-
-// Make a polynomial prediction of the form
-// y = coefficients_[0] + coefficients_[1] * t + coefficients_[2] * t^2 + ...
-// where t is time and y is the position and orientation.
-// We recompute the coefficients whenever we add a new sample using
-// training_window previous samples.
-template <size_t PolynomialDegree, size_t TrainingWindow>
-class PolynomialPosePredictor : public BufferedPredictor {
- public:
-  PolynomialPosePredictor(double regularization = 1e-9)
-      : BufferedPredictor(TrainingWindow), regularization_(regularization) {
-    static_assert(PolynomialDegree + 1 >= TrainingWindow,
-                  "Underconstrained polynomial regressor");
-  }
-
-  ~PolynomialPosePredictor() = default;
-
-  // We convert pose samples into a vector for matrix arithmetic using this
-  // mapping.
-  enum Components {
-    kPositionX = 0,
-    kPositionY,
-    kPositionZ,
-    kOrientationX,
-    kOrientationY,
-    kOrientationZ,
-    kOrientationW,
-    kNumComponents
-  };
-
-  // Add a new sample.
-  void Add(const Sample& sample, DvrPoseAsync* out_pose) override {
-    // Add the sample to the ring buffer.
-    BufferedPredictor::BufferSample(sample);
-
-    Eigen::Matrix<double, TrainingWindow, kNumComponents> values;
-
-    // Get the pose samples into matrices for fitting.
-    double t_vector[TrainingWindow];
-    for (size_t i = 0; i < TrainingWindow; ++i) {
-      const auto& prev_sample = PrevSample(i);
-
-      t_vector[i] = NsToT(prev_sample.time_ns);
-
-      // Save the values we will be fitting to at each sample time.
-      values(i, kPositionX) = prev_sample.position.x();
-      values(i, kPositionY) = prev_sample.position.y();
-      values(i, kPositionZ) = prev_sample.position.z();
-      values(i, kOrientationX) = prev_sample.orientation.x();
-      values(i, kOrientationY) = prev_sample.orientation.y();
-      values(i, kOrientationZ) = prev_sample.orientation.z();
-      values(i, kOrientationW) = prev_sample.orientation.w();
-    }
-
-    // Some transient matrices for solving for coefficient matrix.
-    Eigen::Matrix<double, PolynomialDegree + 1, PolynomialDegree + 1> M;
-    Eigen::Vector<double, PolynomialDegree + 1> d;
-    Eigen::Vector<double, PolynomialDegree + 1> p;
-
-    // Create a polynomial fit for each component.
-    for (size_t component = 0; component < kNumComponents; ++component) {
-      // A = [ 1 t t^2 ... ]'
-      // x = [ coefficients[0] coefficients[1] .... ]'
-      // b = [ position.x ]'
-      // We would like to solve A' x + regularization * I = b'
-      // given the samples we have in our training window.
-      //
-      // The loop below will compute:
-      // M = A' * A
-      // d = A' * b
-      // so we can solve M * coefficients + regularization * I = b
-
-      M.setIdentity();
-      d.setZero();
-      p[0] = 1;
-
-      // M = regularization * I
-      M = M * regularization_;
-
-      // Accumulate the poses in the training window.
-      for (size_t i = 0; i < TrainingWindow; ++i) {
-        // Compute the polynomial at this sample.
-        for (size_t j = 1; j <= PolynomialDegree; ++j) {
-          p[j] = p[j - 1] * t_vector[i];
-        }
-
-        // Accumulate the left and right hand sides.
-        M = M + p * p.transpose();
-        d = d + p * values(i, component);
-      }
-
-      // M is symmetric, positive semi-definite.
-      // Note: This is not the most accurate solver out there but is fast.
-      coefficients_.row(component) = Eigen::LLT<Eigen::MatrixXd>(M).solve(d);
-    }
-
-    // Fill out the out_pose at this sample.
-    Predict(sample.time_ns, sample.time_ns, out_pose);
-  }
-
-  // Predict using the polynomial coefficients.
-  void Predict(int64_t left_time_ns, int64_t right_time_ns,
-               DvrPoseAsync* out_pose) const override {
-    // Predict the left side.
-    const auto left = SamplePolynomial(left_time_ns);
-    out_pose->translation = {static_cast<float>(left[kPositionX]),
-                             static_cast<float>(left[kPositionY]),
-                             static_cast<float>(left[kPositionZ])};
-    out_pose->orientation = normalize(left[kOrientationX], left[kOrientationY],
-                                      left[kOrientationZ], left[kOrientationW]);
-
-    // Predict the right side.
-    const auto right = SamplePolynomial(right_time_ns);
-    out_pose->right_translation = {static_cast<float>(right[kPositionX]),
-                                   static_cast<float>(right[kPositionY]),
-                                   static_cast<float>(right[kPositionZ])};
-    out_pose->right_orientation =
-        normalize(right[kOrientationX], right[kOrientationY],
-                  right[kOrientationZ], right[kOrientationW]);
-
-    // Finite differencing to estimate the velocities.
-    const auto a = SamplePolynomial(
-        (left_time_ns + right_time_ns - kFiniteDifferenceNs) / 2);
-    const auto b = SamplePolynomial(
-        (left_time_ns + right_time_ns + kFiniteDifferenceNs) / 2);
-
-    out_pose->velocity = {static_cast<float>((b[kPositionX] - a[kPositionX]) /
-                                             NsToSeconds(kFiniteDifferenceNs)),
-                          static_cast<float>((b[kPositionY] - a[kPositionY]) /
-                                             NsToSeconds(kFiniteDifferenceNs)),
-                          static_cast<float>((b[kPositionZ] - a[kPositionZ]) /
-                                             NsToSeconds(kFiniteDifferenceNs)),
-                          0.0f};
-
-    // Get the predicted orientations into quaternions, which are probably not
-    // quite unit.
-    const quatd a_orientation(a[kOrientationW], a[kOrientationX],
-                              a[kOrientationY], a[kOrientationZ]);
-    const quatd b_orientation(b[kOrientationW], b[kOrientationX],
-                              b[kOrientationY], b[kOrientationZ]);
-    const auto angular_velocity =
-        AngularVelocity(a_orientation.normalized(), b_orientation.normalized(),
-                        NsToSeconds(kFiniteDifferenceNs));
-
-    out_pose->angular_velocity = {static_cast<float>(angular_velocity[0]),
-                                  static_cast<float>(angular_velocity[1]),
-                                  static_cast<float>(angular_velocity[2]),
-                                  0.0f};
-    out_pose->timestamp_ns = left_time_ns;
-    out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
-    memset(out_pose->pad, 0, sizeof(out_pose->pad));
-  }
-
- private:
-  // Take a quaternion and return a normalized version in a float32x4_t.
-  static float32x4_t normalize(double x, double y, double z, double w) {
-    const auto l = std::sqrt(x * x + y * y + z * z + w * w);
-    return {static_cast<float>(x / l), static_cast<float>(y / l),
-            static_cast<float>(z / l), static_cast<float>(w / l)};
-  }
-
-  // Evaluate the polynomial at a particular time.
-  Eigen::Vector<double, kNumComponents> SamplePolynomial(
-      int64_t time_ns) const {
-    const auto t = NsToT(time_ns);
-    Eigen::Vector<double, PolynomialDegree + 1> polynomial;
-    double current_polynomial = t;
-
-    // Compute polynomial = [ 1 t t^2 ... ]
-    polynomial[0] = 1;
-    for (size_t degree = 1; degree <= PolynomialDegree;
-         ++degree, current_polynomial *= t) {
-      polynomial[degree] = polynomial[degree - 1] * t;
-    }
-
-    // The coefficients_ = [ numComponents x (polynomial degree + 1) ].
-    return coefficients_ * polynomial;
-  }
-
-  // Convert a time in nanoseconds to t.
-  // We could use the seconds as t but this would create make it more difficult
-  // to tweak the regularization amount. So we subtract the last sample time so
-  // the scale of the regularization constant doesn't change as a function of
-  // time.
-  double NsToT(int64_t time_ns) const {
-    return NsToSeconds(time_ns - buffer_[current_pose_index_].time_ns);
-  }
-
-  // The ridge regularization constant.
-  double regularization_;
-
-  // This is where we store the polynomial coefficients.
-  Eigen::Matrix<double, kNumComponents, PolynomialDegree + 1> coefficients_;
-};
-
-// Some common polynomial types.
-extern template class PolynomialPosePredictor<1, 2>;
-extern template class PolynomialPosePredictor<2, 3>;
-extern template class PolynomialPosePredictor<3, 4>;
-extern template class PolynomialPosePredictor<4, 5>;
-
-using QuadricPosePredictor = PolynomialPosePredictor<2, 3>;
-using CubicPosePredictor = PolynomialPosePredictor<3, 4>;
-using QuarticPosePredictor = PolynomialPosePredictor<4, 5>;
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h b/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h
deleted file mode 100644
index d774500..0000000
--- a/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef ANDROID_DVR_LINEAR_POSE_PREDICTOR_H_
-#define ANDROID_DVR_LINEAR_POSE_PREDICTOR_H_
-
-#include <private/dvr/pose_client_internal.h>
-#include <private/dvr/types.h>
-
-namespace android {
-namespace dvr {
-
-// This is an abstract base class for prediction 6dof pose given
-// a set of samples.
-//
-// TODO(okana): Create a framework for testing different subclasses for
-// performance and accuracy.
-class PosePredictor {
- public:
-  PosePredictor() = default;
-  virtual ~PosePredictor() = default;
-
-  // The nanoseconds to use for finite differencing.
-  static constexpr int64_t kFiniteDifferenceNs = 100;
-
-  // Encapsulates a pose sample.
-  struct Sample {
-    vec3d position = vec3d::Zero();
-    quatd orientation = quatd::Identity();
-    int64_t time_ns = 0;
-  };
-
-  // Compute the angular velocity from orientation start_orientation to
-  // end_orientation in delta_time.
-  static vec3d AngularVelocity(const quatd& start_orientation,
-                               const quatd& end_orientation, double delta_time);
-
-  // Initialize the out pose from a sample.
-  static void InitializeFromSample(const Sample& sample, DvrPoseAsync* out_pose,
-                                   const vec3d& velocity,
-                                   const vec3d& angular_velocity);
-
-  // Add a pose sample coming from the sensors.
-  // Returns this sample as a dvr pose.
-  //
-  // We will use the returned pose if prediction is not enabled.
-  virtual void Add(const Sample& sample, DvrPoseAsync* out_pose) = 0;
-
-  // Make a pose prediction for the left and right eyes at specific times.
-  virtual void Predict(int64_t left_time_ns, int64_t right_time_ns,
-                       DvrPoseAsync* out_pose) const = 0;
-
-  // Helpers
-  static double NsToSeconds(int64_t time_ns) { return time_ns / 1e9; }
-  static int64_t SecondsToNs(double seconds) {
-    return static_cast<int64_t>(seconds * 1e9);
-  }
-};
-
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_LINEAR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/linear_pose_predictor.cpp b/libs/vr/libposepredictor/linear_pose_predictor.cpp
deleted file mode 100644
index 11db735..0000000
--- a/libs/vr/libposepredictor/linear_pose_predictor.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-#include <private/dvr/linear_pose_predictor.h>
-
-namespace android {
-namespace dvr {
-
-using AngleAxisd = Eigen::AngleAxis<double>;
-
-void LinearPosePredictor::Add(const Sample& sample, DvrPoseAsync* out_pose) {
-  // If we are receiving a new sample, move the index to the next item.
-  // If the time stamp is the same as the last frame, we will just overwrite
-  // it with the new data.
-  if (sample.time_ns != samples_[current_index_].time_ns) {
-    current_index_ ^= 1;
-  }
-
-  // Save the sample.
-  samples_[current_index_] = sample;
-
-  // The previous sample we received.
-  const auto& previous_sample = samples_[current_index_ ^ 1];
-
-  // Ready to compute velocities.
-  const auto pose_delta_time =
-      NsToSeconds(sample.time_ns - previous_sample.time_ns);
-
-  if (pose_delta_time > 0.0) {
-    velocity_ = (sample.position - previous_sample.position) / pose_delta_time;
-    rotational_velocity_ = PosePredictor::AngularVelocity(
-        previous_sample.orientation, sample.orientation, pose_delta_time);
-  } else {
-    velocity_ = vec3d::Zero();
-    rotational_velocity_ = vec3d::Zero();
-  }
-
-  // Temporary experiment with acceleration estimate.
-  angular_speed_ = rotational_velocity_.norm();
-  angular_accel_ = 0.0;
-  if (forward_predict_angular_speed_) {
-    angular_accel_ =
-        pose_delta_time > 0.0
-            ? (angular_speed_ - last_angular_speed_) / pose_delta_time
-            : 0.0;
-  }
-  last_angular_speed_ = angular_speed_;
-
-  rotational_axis_ = vec3d(0.0, 1.0, 0.0);
-  if (angular_speed_ > 0.0) {
-    rotational_axis_ = rotational_velocity_ / angular_speed_;
-  }
-
-  InitializeFromSample(sample, out_pose, velocity_, rotational_velocity_);
-}
-
-void LinearPosePredictor::Predict(int64_t left_time_ns, int64_t right_time_ns,
-                                  DvrPoseAsync* out_pose) const {
-  const auto& sample = samples_[current_index_];
-
-  double dt = NsToSeconds(left_time_ns - sample.time_ns);
-  double r_dt = NsToSeconds(right_time_ns - sample.time_ns);
-
-  // Temporary forward prediction code.
-  auto start_t_head_future = sample.position + velocity_ * dt;
-  auto r_start_t_head_future = sample.position + velocity_ * r_dt;
-  double angle = angular_speed_ * dt;
-  double r_angle = angular_speed_ * r_dt;
-  if (__builtin_expect(forward_predict_angular_speed_, 0)) {
-    angle += 0.5 * angular_accel_ * dt * dt;
-    r_angle += 0.5 * angular_accel_ * r_dt * r_dt;
-  }
-  auto start_q_head_future =
-      sample.orientation * quatd(AngleAxisd(angle, rotational_axis_));
-  auto r_start_q_head_future =
-      sample.orientation * quatd(AngleAxisd(r_angle, rotational_axis_));
-
-  out_pose->orientation = {static_cast<float>(start_q_head_future.x()),
-                           static_cast<float>(start_q_head_future.y()),
-                           static_cast<float>(start_q_head_future.z()),
-                           static_cast<float>(start_q_head_future.w())};
-
-  out_pose->translation = {static_cast<float>(start_t_head_future.x()),
-                           static_cast<float>(start_t_head_future.y()),
-                           static_cast<float>(start_t_head_future.z()), 0.0f};
-
-  out_pose->right_orientation = {static_cast<float>(r_start_q_head_future.x()),
-                                 static_cast<float>(r_start_q_head_future.y()),
-                                 static_cast<float>(r_start_q_head_future.z()),
-                                 static_cast<float>(r_start_q_head_future.w())};
-
-  out_pose->right_translation = {static_cast<float>(r_start_t_head_future.x()),
-                                 static_cast<float>(r_start_t_head_future.y()),
-                                 static_cast<float>(r_start_t_head_future.z()),
-                                 0.0f};
-
-  out_pose->angular_velocity = {static_cast<float>(rotational_velocity_.x()),
-                                static_cast<float>(rotational_velocity_.y()),
-                                static_cast<float>(rotational_velocity_.z()),
-                                0.0f};
-
-  out_pose->velocity = {static_cast<float>(velocity_.x()),
-                        static_cast<float>(velocity_.y()),
-                        static_cast<float>(velocity_.z()), 0.0f};
-
-  out_pose->timestamp_ns = left_time_ns;
-  out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
-  memset(out_pose->pad, 0, sizeof(out_pose->pad));
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp b/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp
deleted file mode 100644
index 1f38041a..0000000
--- a/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-#include <gtest/gtest.h>
-
-#include <private/dvr/linear_pose_predictor.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-// For comparing expected and actual.
-constexpr double kAbsErrorTolerance = 1e-5;
-
-// The default rotation axis we will be using.
-const vec3d kRotationAxis = vec3d(1, 4, 3).normalized();
-
-// Linearly interpolate between a and b.
-vec3d lerp(const vec3d& a, const vec3d& b, double t) { return (b - a) * t + a; }
-
-// Linearly interpolate between two angles and return the resulting rotation as
-// a quaternion (around the kRotationAxis).
-quatd qlerp(double angle1, double angle2, double t) {
-  return quatd(
-      Eigen::AngleAxis<double>((angle2 - angle1) * t + angle1, kRotationAxis));
-}
-
-// Compare two positions.
-void TestPosition(const vec3d& expected, const float32x4_t& actual) {
-  for (int i = 0; i < 3; ++i) {
-    EXPECT_NEAR(expected[i], static_cast<double>(actual[i]),
-                kAbsErrorTolerance);
-  }
-}
-
-// Compare two orientations.
-void TestOrientation(const quatd& expected, const float32x4_t& actual) {
-  // abs(expected.dot(actual)) > 1-eps
-  EXPECT_GE(std::abs(vec4d(actual[0], actual[1], actual[2], actual[3])
-                         .dot(expected.coeffs())),
-            0.99);
-}
-}
-
-// Test the extrapolation from two samples.
-TEST(LinearPosePredictorTest, Extrapolation) {
-  LinearPosePredictor predictor;
-
-  // We wil extrapolate linearly from [position|orientation] 1 -> 2.
-  const vec3d position1(0, 0, 0);
-  const vec3d position2(1, 2, 3);
-  const double angle1 = M_PI * 0.3;
-  const double angle2 = M_PI * 0.5;
-  const quatd orientation1(Eigen::AngleAxis<double>(angle1, kRotationAxis));
-  const quatd orientation2(Eigen::AngleAxis<double>(angle2, kRotationAxis));
-  const int64_t t1_ns = 0;           //< First sample time stamp
-  const int64_t t2_ns = 10;          //< The second sample time stamp
-  const int64_t eval_left_ns = 23;   //< The eval time for left
-  const int64_t eval_right_ns = 31;  //< The eval time for right
-  DvrPoseAsync start_pose, end_pose, extrapolated_pose;
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = position1, .orientation = orientation1, .time_ns = t1_ns},
-      &start_pose);
-
-  // The start pose is passthough.
-  TestPosition(position1, start_pose.translation);
-  TestPosition(position1, start_pose.right_translation);
-  TestOrientation(orientation1, start_pose.orientation);
-  TestOrientation(orientation1, start_pose.right_orientation);
-  EXPECT_EQ(t1_ns, start_pose.timestamp_ns);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = position2, .orientation = orientation2, .time_ns = t2_ns},
-      &end_pose);
-
-  TestPosition(position2, end_pose.translation);
-  TestPosition(position2, end_pose.right_translation);
-  TestOrientation(orientation2, end_pose.orientation);
-  TestOrientation(orientation2, end_pose.right_orientation);
-  EXPECT_EQ(t2_ns, end_pose.timestamp_ns);
-
-  // Extrapolate from t1 - t2 to eval_[left/right].
-  predictor.Predict(eval_left_ns, eval_right_ns, &extrapolated_pose);
-
-  // The interpolation factors for left and right.
-  const auto left_t =
-      (eval_left_ns - t1_ns) / static_cast<double>(t2_ns - t1_ns);
-  EXPECT_EQ(2.3, left_t);
-
-  const auto right_t =
-      (eval_right_ns - t1_ns) / static_cast<double>(t2_ns - t1_ns);
-  EXPECT_EQ(3.1, right_t);
-
-  TestPosition(lerp(position1, position2, left_t),
-               extrapolated_pose.translation);
-  TestPosition(lerp(position1, position2, right_t),
-               extrapolated_pose.right_translation);
-  TestOrientation(qlerp(angle1, angle2, left_t), extrapolated_pose.orientation);
-  TestOrientation(qlerp(angle1, angle2, right_t),
-                  extrapolated_pose.right_orientation);
-}
-
-// Test three samples, where the last two samples have the same timestamp.
-TEST(LinearPosePredictorTest, DuplicateSamples) {
-  LinearPosePredictor predictor;
-
-  const vec3d position1(0, 0, 0);
-  const vec3d position2(1, 2, 3);
-  const vec3d position3(2, 2, 3);
-  const double angle1 = M_PI * 0.3;
-  const double angle2 = M_PI * 0.5;
-  const double angle3 = M_PI * 0.65;
-  const quatd orientation1(Eigen::AngleAxis<double>(angle1, kRotationAxis));
-  const quatd orientation2(Eigen::AngleAxis<double>(angle2, kRotationAxis));
-  const quatd orientation3(Eigen::AngleAxis<double>(angle3, kRotationAxis));
-  const int64_t t1_ns = 0;
-  const int64_t t2_ns = 10;
-  const int64_t eval_left_ns = 27;
-  const int64_t eval_right_ns = 31;
-  DvrPoseAsync start_pose, end_pose, extrapolated_pose;
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = position1, .orientation = orientation1, .time_ns = t1_ns},
-      &start_pose);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = position2, .orientation = orientation2, .time_ns = t2_ns},
-      &end_pose);
-
-  {
-    // Extrapolate from t1 - t2 to eval_[left/right].
-    predictor.Predict(eval_left_ns, eval_right_ns, &extrapolated_pose);
-
-    // The interpolation factors for left and right.
-    const auto left_t =
-        (eval_left_ns - t1_ns) / static_cast<double>(t2_ns - t1_ns);
-    const auto right_t =
-        (eval_right_ns - t1_ns) / static_cast<double>(t2_ns - t1_ns);
-
-    // Test the result.
-    TestPosition(lerp(position1, position2, left_t),
-                 extrapolated_pose.translation);
-    TestPosition(lerp(position1, position2, right_t),
-                 extrapolated_pose.right_translation);
-    TestOrientation(qlerp(angle1, angle2, left_t),
-                    extrapolated_pose.orientation);
-    TestOrientation(qlerp(angle1, angle2, right_t),
-                    extrapolated_pose.right_orientation);
-  }
-
-  // Sending a duplicate sample here.
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = position3, .orientation = orientation3, .time_ns = t2_ns},
-      &end_pose);
-
-  {
-    // Extrapolate from t1 - t2 to eval_[left/right].
-    predictor.Predict(eval_left_ns, eval_right_ns, &extrapolated_pose);
-
-    // The interpolation factors for left and right.
-    const auto left_t =
-        (eval_left_ns - t1_ns) / static_cast<double>(t2_ns - t1_ns);
-    const auto right_t =
-        (eval_right_ns - t1_ns) / static_cast<double>(t2_ns - t1_ns);
-
-    // Test the result.
-    TestPosition(lerp(position1, position3, left_t),
-                 extrapolated_pose.translation);
-    TestPosition(lerp(position1, position3, right_t),
-                 extrapolated_pose.right_translation);
-    TestOrientation(qlerp(angle1, angle3, left_t),
-                    extrapolated_pose.orientation);
-    TestOrientation(qlerp(angle1, angle3, right_t),
-                    extrapolated_pose.right_orientation);
-  }
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libposepredictor/linear_predictor.cpp b/libs/vr/libposepredictor/linear_predictor.cpp
new file mode 100644
index 0000000..6f924dc
--- /dev/null
+++ b/libs/vr/libposepredictor/linear_predictor.cpp
@@ -0,0 +1,70 @@
+#include <linear_predictor.h>
+
+namespace posepredictor {
+
+using AngleAxis = Eigen::AngleAxis<real>;
+
+void LinearPosePredictor::Add(const Pose& sample) {
+  // If we are receiving a new sample, move the index to the next item.
+  // If the time stamp is the same as the last frame, we will just overwrite
+  // it with the new data.
+  if (sample.time_ns != samples_[current_index_].time_ns) {
+    current_index_ ^= 1;
+  }
+
+  // Save the sample.
+  samples_[current_index_] = sample;
+
+  // The previous sample we received.
+  const auto& previous_sample = samples_[current_index_ ^ 1];
+
+  // Ready to compute velocities.
+  const auto pose_delta_time =
+      NsToSeconds(sample.time_ns - previous_sample.time_ns);
+
+  if (pose_delta_time > 0.0) {
+    velocity_ = (sample.position - previous_sample.position) / pose_delta_time;
+    rotational_velocity_ = Predictor::AngularVelocity(
+        previous_sample.orientation, sample.orientation, pose_delta_time);
+  } else {
+    velocity_ = vec3::Zero();
+    rotational_velocity_ = vec3::Zero();
+  }
+
+  // Temporary experiment with acceleration estimate.
+  angular_speed_ = rotational_velocity_.norm();
+  angular_accel_ = 0.0;
+  if (forward_predict_angular_speed_) {
+    angular_accel_ =
+        pose_delta_time > 0.0
+            ? (angular_speed_ - last_angular_speed_) / pose_delta_time
+            : 0.0;
+  }
+  last_angular_speed_ = angular_speed_;
+
+  rotational_axis_ = vec3(0.0, 1.0, 0.0);
+  if (angular_speed_ > 0.0) {
+    rotational_axis_ = rotational_velocity_ / angular_speed_;
+  }
+}
+
+Pose LinearPosePredictor::Predict(int64_t time_ns) const {
+  const auto& sample = samples_[current_index_];
+
+  const auto dt = NsToSeconds(time_ns - sample.time_ns);
+
+  // Temporary forward prediction code.
+  auto angle = angular_speed_ * dt;
+  if (__builtin_expect(forward_predict_angular_speed_, 0)) {
+    angle += 0.5 * angular_accel_ * dt * dt;
+  }
+
+  return {time_ns, sample.position + velocity_ * dt,
+          sample.orientation * quat(AngleAxis(angle, rotational_axis_))};
+}
+
+Velocity LinearPosePredictor::PredictVelocity(int64_t /* time_ns */) const {
+  return {velocity_, rotational_velocity_};
+}
+
+}  // namespace posepredictor
diff --git a/libs/vr/libposepredictor/linear_predictor_tests.cpp b/libs/vr/libposepredictor/linear_predictor_tests.cpp
new file mode 100644
index 0000000..d94aa2d
--- /dev/null
+++ b/libs/vr/libposepredictor/linear_predictor_tests.cpp
@@ -0,0 +1,170 @@
+#include <gtest/gtest.h>
+
+#include <linear_predictor.h>
+
+namespace posepredictor {
+
+namespace {
+
+// For comparing expected and actual.
+constexpr real kAbsErrorTolerance = 1e-5;
+
+// The default rotation axis we will be using.
+const vec3 kRotationAxis = vec3(1, 4, 3).normalized();
+
+// Linearly interpolate between a and b.
+vec3 lerp(const vec3& a, const vec3& b, real t) { return (b - a) * t + a; }
+
+// Linearly interpolate between two angles and return the resulting rotation as
+// a quaternion (around the kRotationAxis).
+quat qlerp(real angle1, real angle2, real t) {
+  return quat(
+      Eigen::AngleAxis<real>((angle2 - angle1) * t + angle1, kRotationAxis));
+}
+
+// Compare two positions.
+void TestPosition(const vec3& expected, const vec3& actual) {
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_NEAR(expected[i], actual[i], kAbsErrorTolerance);
+  }
+}
+
+// Compare two orientations.
+void TestOrientation(const quat& expected, const quat& actual) {
+  // abs(expected.dot(actual)) > 1-eps
+  EXPECT_GE(std::abs(actual.coeffs().dot(expected.coeffs())), 0.99);
+}
+}
+
+// Test the extrapolation from two samples.
+TEST(LinearPosePredictorTest, Extrapolation) {
+  LinearPosePredictor predictor;
+
+  // We wil extrapolate linearly from [position|orientation] 1 -> 2.
+  const vec3 position1(0, 0, 0);
+  const vec3 position2(1, 2, 3);
+  const real angle1 = M_PI * 0.3;
+  const real angle2 = M_PI * 0.5;
+  const quat orientation1(Eigen::AngleAxis<real>(angle1, kRotationAxis));
+  const quat orientation2(Eigen::AngleAxis<real>(angle2, kRotationAxis));
+  const int64_t t1_ns = 0;           //< First sample time stamp
+  const int64_t t2_ns = 10;          //< The second sample time stamp
+  const int64_t eval_left_ns = 23;   //< The eval time for left
+  const int64_t eval_right_ns = 31;  //< The eval time for right
+  Pose start_pose, end_pose, extrapolated_pose;
+
+  predictor.Add(Pose{
+      .position = position1, .orientation = orientation1, .time_ns = t1_ns});
+
+  predictor.Add(Pose{
+      .position = position2, .orientation = orientation2, .time_ns = t2_ns});
+
+  // Extrapolate from t1 - t2 to eval_[left/right].
+  extrapolated_pose = predictor.Predict(eval_left_ns);
+
+  // The interpolation factors for left and right.
+  const auto left_t = (eval_left_ns - t1_ns) / static_cast<real>(t2_ns - t1_ns);
+  EXPECT_EQ(2.3, left_t);
+
+  TestPosition(lerp(position1, position2, left_t), extrapolated_pose.position);
+
+  TestOrientation(qlerp(angle1, angle2, left_t), extrapolated_pose.orientation);
+
+  extrapolated_pose = predictor.Predict(eval_right_ns);
+
+  const auto right_t =
+      (eval_right_ns - t1_ns) / static_cast<real>(t2_ns - t1_ns);
+  EXPECT_EQ(3.1, right_t);
+
+  TestPosition(lerp(position1, position2, right_t), extrapolated_pose.position);
+
+  TestOrientation(qlerp(angle1, angle2, right_t),
+                  extrapolated_pose.orientation);
+}
+
+// Test three samples, where the last two samples have the same timestamp.
+TEST(LinearPosePredictorTest, DuplicateSamples) {
+  LinearPosePredictor predictor;
+
+  const vec3 position1(0, 0, 0);
+  const vec3 position2(1, 2, 3);
+  const vec3 position3(2, 2, 3);
+  const real angle1 = M_PI * 0.3;
+  const real angle2 = M_PI * 0.5;
+  const real angle3 = M_PI * 0.65;
+  const quat orientation1(Eigen::AngleAxis<real>(angle1, kRotationAxis));
+  const quat orientation2(Eigen::AngleAxis<real>(angle2, kRotationAxis));
+  const quat orientation3(Eigen::AngleAxis<real>(angle3, kRotationAxis));
+  const int64_t t1_ns = 0;
+  const int64_t t2_ns = 10;
+  const int64_t eval_left_ns = 27;
+  const int64_t eval_right_ns = 31;
+  Pose extrapolated_pose;
+
+  predictor.Add(Pose{
+      .position = position1, .orientation = orientation1, .time_ns = t1_ns});
+
+  predictor.Add(Pose{
+      .position = position2, .orientation = orientation2, .time_ns = t2_ns});
+
+  {
+    // Extrapolate from t1 - t2 to eval_[left/right].
+    extrapolated_pose = predictor.Predict(eval_left_ns);
+
+    // The interpolation factors for left and right.
+    const auto left_t =
+        (eval_left_ns - t1_ns) / static_cast<real>(t2_ns - t1_ns);
+
+    // Test the result.
+    TestPosition(lerp(position1, position2, left_t),
+                 extrapolated_pose.position);
+
+    TestOrientation(qlerp(angle1, angle2, left_t),
+                    extrapolated_pose.orientation);
+
+    extrapolated_pose = predictor.Predict(eval_right_ns);
+
+    const auto right_t =
+        (eval_right_ns - t1_ns) / static_cast<real>(t2_ns - t1_ns);
+
+    TestPosition(lerp(position1, position2, right_t),
+                 extrapolated_pose.position);
+
+    TestOrientation(qlerp(angle1, angle2, right_t),
+                    extrapolated_pose.orientation);
+  }
+
+  // Sending a duplicate sample here.
+  predictor.Add(Pose{
+      .position = position3, .orientation = orientation3, .time_ns = t2_ns});
+
+  {
+    // Extrapolate from t1 - t2 to eval_[left/right].
+    extrapolated_pose = predictor.Predict(eval_left_ns);
+
+    // The interpolation factors for left and right.
+    const auto left_t =
+        (eval_left_ns - t1_ns) / static_cast<real>(t2_ns - t1_ns);
+
+    TestPosition(lerp(position1, position3, left_t),
+                 extrapolated_pose.position);
+
+    TestOrientation(qlerp(angle1, angle3, left_t),
+                    extrapolated_pose.orientation);
+
+    extrapolated_pose = predictor.Predict(eval_right_ns);
+
+    const auto right_t =
+        (eval_right_ns - t1_ns) / static_cast<real>(t2_ns - t1_ns);
+
+    // Test the result.
+
+    TestPosition(lerp(position1, position3, right_t),
+                 extrapolated_pose.position);
+
+    TestOrientation(qlerp(angle1, angle3, right_t),
+                    extrapolated_pose.orientation);
+  }
+}
+
+}  // namespace posepredictor
diff --git a/libs/vr/libposepredictor/polynomial_pose_predictor_tests.cpp b/libs/vr/libposepredictor/polynomial_pose_predictor_tests.cpp
deleted file mode 100644
index 9722182..0000000
--- a/libs/vr/libposepredictor/polynomial_pose_predictor_tests.cpp
+++ /dev/null
@@ -1,139 +0,0 @@
-#include <gtest/gtest.h>
-
-#include <private/dvr/polynomial_pose_predictor.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-// For comparing expected and actual.
-constexpr double kAbsErrorTolerance = 1e-5;
-
-// Test the linear extrapolation from two samples.
-TEST(PolynomialPosePredictor, Linear) {
-  DvrPoseAsync dummy;
-
-  // Degree = 1, simple line, passing through two points.
-  // Note the regularization is 0 so we expect the exact fit.
-  PolynomialPosePredictor<1, 2> predictor(0);
-
-  // Add two samples.
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 0},
-      &dummy);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 10},
-      &dummy);
-
-  DvrPoseAsync predicted_pose;
-
-  predictor.Predict(20, 30, &predicted_pose);
-
-  // Check the x,y,z components for the expected translation.
-  EXPECT_NEAR(predicted_pose.translation[0], 2, kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.translation[1], 4, kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.translation[2], 6, kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.right_translation[0], 3, kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.right_translation[1], 6, kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.right_translation[2], 9, kAbsErrorTolerance);
-}
-
-// Test the degree two polynomial fit.
-TEST(PolynomialPosePredictor, Quadric) {
-  DvrPoseAsync dummy;
-
-  // Degree = 2, need three samples to fit a polynomial.
-  // Note the regularization is 0 so we expect the exact fit.
-  PolynomialPosePredictor<2, 3> predictor(0);
-
-  // Add three samples.
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 0},
-      &dummy);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 10},
-      &dummy);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 20},
-      &dummy);
-
-  // The expected polynomials for x/y/z.
-
-  // x:  0.01 * t^2 - 0.2 * t + 1
-  const auto x = [](auto t) { return 0.01 * t * t - 0.2 * t + 1; };
-
-  // y:  0.02 * t^2 - 0.4 * t + 2
-  const auto y = [](auto t) { return 0.02 * t * t - 0.4 * t + 2; };
-
-  // z:  0.03 * t^2 - 0.6 * t + 3
-  const auto z = [](auto t) { return 0.03 * t * t - 0.6 * t + 3; };
-
-  DvrPoseAsync predicted_pose;
-
-  predictor.Predict(40, 50, &predicted_pose);
-
-  // Check the x,y,z components for the expected translation.
-  EXPECT_NEAR(predicted_pose.translation[0], x(40), kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.translation[1], y(40), kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.translation[2], z(40), kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.right_translation[0], x(50), kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.right_translation[1], y(50), kAbsErrorTolerance);
-  EXPECT_NEAR(predicted_pose.right_translation[2], z(50), kAbsErrorTolerance);
-}
-
-// Test the degree two polynomial fit with degenerate input.
-//
-// The input samples all lie in a line which would normally make our system
-// degenerate. We will rely on the regularization term to recover the linear
-// solution in a quadric predictor.
-TEST(PolynomialPosePredictor, QuadricDegenate) {
-  DvrPoseAsync dummy;
-
-  // Degree = 2, need three samples to fit a polynomial.
-  // Note that we are using the default regularization term here.
-  // We cannot use 0 regularizer since the input is degenerate.
-  PolynomialPosePredictor<2, 3> predictor(1e-20);
-
-  // Add three samples.
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 0},
-      &dummy);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 10},
-      &dummy);
-
-  predictor.Add(
-      PosePredictor::Sample{
-          .position = {2, 4, 6}, .orientation = {0, 0, 0, 1}, .time_ns = 20},
-      &dummy);
-
-  DvrPoseAsync predicted_pose;
-
-  predictor.Predict(30, 40, &predicted_pose);
-
-  // Check the x,y,z components for the expected translation.
-  // We are using a higher error threshold since this is now approximate.
-  EXPECT_NEAR(predicted_pose.translation[0], 3, 0.001);
-  EXPECT_NEAR(predicted_pose.translation[1], 6, 0.001);
-  EXPECT_NEAR(predicted_pose.translation[2], 9, 0.001);
-  EXPECT_NEAR(predicted_pose.right_translation[0], 4, 0.001);
-  EXPECT_NEAR(predicted_pose.right_translation[1], 8, 0.001);
-  EXPECT_NEAR(predicted_pose.right_translation[2], 12, 0.001);
-}
-
-}  // namespace
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libposepredictor/polynomial_pose_predictor.cpp b/libs/vr/libposepredictor/polynomial_predictor.cpp
similarity index 63%
rename from libs/vr/libposepredictor/polynomial_pose_predictor.cpp
rename to libs/vr/libposepredictor/polynomial_predictor.cpp
index 47eab8a..98fd28a 100644
--- a/libs/vr/libposepredictor/polynomial_pose_predictor.cpp
+++ b/libs/vr/libposepredictor/polynomial_predictor.cpp
@@ -1,7 +1,6 @@
-#include <private/dvr/polynomial_pose_predictor.h>
+#include <polynomial_predictor.h>
 
-namespace android {
-namespace dvr {
+namespace posepredictor {
 
 // Instantiate the common polynomial types.
 template class PolynomialPosePredictor<1, 2>;
@@ -9,5 +8,4 @@
 template class PolynomialPosePredictor<3, 4>;
 template class PolynomialPosePredictor<4, 5>;
 
-}  // namespace dvr
-}  // namespace android
+}  // namespace posepredictor
diff --git a/libs/vr/libposepredictor/polynomial_predictor_tests.cpp b/libs/vr/libposepredictor/polynomial_predictor_tests.cpp
new file mode 100644
index 0000000..88cb2b9
--- /dev/null
+++ b/libs/vr/libposepredictor/polynomial_predictor_tests.cpp
@@ -0,0 +1,120 @@
+#include <gtest/gtest.h>
+
+#include <polynomial_predictor.h>
+
+namespace posepredictor {
+
+namespace {
+
+// For comparing expected and actual.
+constexpr real kAbsErrorTolerance = 1e-5;
+
+// Test the linear extrapolation from two samples.
+TEST(PolynomialPosePredictor, Linear) {
+  // Degree = 1, simple line, passing through two points.
+  // Note the regularization is 0 so we expect the exact fit.
+  PolynomialPosePredictor<1, 2> predictor(0);
+
+  // Add two samples.
+  predictor.Add(
+      Pose{.position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 0});
+
+  predictor.Add(
+      Pose{.position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 10});
+
+  Pose predicted_pose;
+
+  predicted_pose = predictor.Predict(20);
+
+  // Check the x,y,z components for the expected translation.
+  EXPECT_NEAR(predicted_pose.position[0], 2, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[1], 4, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[2], 6, kAbsErrorTolerance);
+
+  predicted_pose = predictor.Predict(30);
+  EXPECT_NEAR(predicted_pose.position[0], 3, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[1], 6, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[2], 9, kAbsErrorTolerance);
+}
+
+// Test the degree two polynomial fit.
+TEST(PolynomialPosePredictor, Quadric) {
+  // Degree = 2, need three samples to fit a polynomial.
+  // Note the regularization is 0 so we expect the exact fit.
+  PolynomialPosePredictor<2, 3> predictor(0);
+
+  // Add three samples.
+  predictor.Add(
+      Pose{.position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 0});
+
+  predictor.Add(
+      Pose{.position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 10});
+
+  predictor.Add(
+      Pose{.position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 20});
+
+  // The expected polynomials for x/y/z.
+
+  // x:  0.01 * t^2 - 0.2 * t + 1
+  const auto x = [](auto t) { return 0.01 * t * t - 0.2 * t + 1; };
+
+  // y:  0.02 * t^2 - 0.4 * t + 2
+  const auto y = [](auto t) { return 0.02 * t * t - 0.4 * t + 2; };
+
+  // z:  0.03 * t^2 - 0.6 * t + 3
+  const auto z = [](auto t) { return 0.03 * t * t - 0.6 * t + 3; };
+
+  Pose predicted_pose;
+  predicted_pose = predictor.Predict(40);
+
+  // Check the x,y,z components for the expected translation.
+  EXPECT_NEAR(predicted_pose.position[0], x(40), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[1], y(40), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[2], z(40), kAbsErrorTolerance);
+
+  predicted_pose = predictor.Predict(50);
+  EXPECT_NEAR(predicted_pose.position[0], x(50), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[1], y(50), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.position[2], z(50), kAbsErrorTolerance);
+}
+
+// Test the degree two polynomial fit with degenerate input.
+//
+// The input samples all lie in a line which would normally make our system
+// degenerate. We will rely on the regularization term to recover the linear
+// solution in a quadric predictor.
+TEST(PolynomialPosePredictor, QuadricDegenate) {
+  // Degree = 2, need three samples to fit a polynomial.
+  // Note that we are using the default regularization term here.
+  // We cannot use 0 regularizer since the input is degenerate.
+  PolynomialPosePredictor<2, 3> predictor(1e-20);
+
+  // Add three samples.
+  predictor.Add(
+      Pose{.position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 0});
+
+  predictor.Add(
+      Pose{.position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 10});
+
+  predictor.Add(
+      Pose{.position = {2, 4, 6}, .orientation = {0, 0, 0, 1}, .time_ns = 20});
+
+  Pose predicted_pose;
+
+  predicted_pose = predictor.Predict(30);
+
+  // Check the x,y,z components for the expected translation.
+  // We are using a higher error threshold since this is now approximate.
+  EXPECT_NEAR(predicted_pose.position[0], 3, 0.001);
+  EXPECT_NEAR(predicted_pose.position[1], 6, 0.001);
+  EXPECT_NEAR(predicted_pose.position[2], 9, 0.001);
+
+  predicted_pose = predictor.Predict(40);
+  EXPECT_NEAR(predicted_pose.position[0], 4, 0.001);
+  EXPECT_NEAR(predicted_pose.position[1], 8, 0.001);
+  EXPECT_NEAR(predicted_pose.position[2], 12, 0.001);
+}
+
+}  // namespace
+
+}  // namespace posepredictor
diff --git a/libs/vr/libposepredictor/pose_predictor.cpp b/libs/vr/libposepredictor/pose_predictor.cpp
deleted file mode 100644
index b09a152..0000000
--- a/libs/vr/libposepredictor/pose_predictor.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-#include <private/dvr/pose_predictor.h>
-
-namespace android {
-namespace dvr {
-
-vec3d PosePredictor::AngularVelocity(const quatd& a, const quatd& b,
-                                     double delta_time) {
-  const auto delta_q = b.inverse() * a;
-  // Check that delta_q.w() == 1, Eigen doesn't respect this convention. If
-  // delta_q.w() == -1, we'll get the opposite velocity.
-  return 2.0 * (delta_q.w() < 0 ? -delta_q.vec() : delta_q.vec()) / delta_time;
-}
-
-void PosePredictor::InitializeFromSample(const Sample& sample,
-                                         DvrPoseAsync* out_pose,
-                                         const vec3d& velocity,
-                                         const vec3d& angular_velocity) {
-  out_pose->orientation = {static_cast<float>(sample.orientation.vec().x()),
-                           static_cast<float>(sample.orientation.vec().y()),
-                           static_cast<float>(sample.orientation.vec().z()),
-                           static_cast<float>(sample.orientation.w())};
-
-  out_pose->translation = {static_cast<float>(sample.position.x()),
-                           static_cast<float>(sample.position.y()),
-                           static_cast<float>(sample.position.z()), 0.0f};
-
-  out_pose->right_orientation = {
-      static_cast<float>(sample.orientation.vec().x()),
-      static_cast<float>(sample.orientation.vec().y()),
-      static_cast<float>(sample.orientation.vec().z()),
-      static_cast<float>(sample.orientation.w())};
-
-  out_pose->right_translation = {static_cast<float>(sample.position.x()),
-                                 static_cast<float>(sample.position.y()),
-                                 static_cast<float>(sample.position.z()), 0.0f};
-
-  out_pose->angular_velocity = {static_cast<float>(angular_velocity.x()),
-                                static_cast<float>(angular_velocity.y()),
-                                static_cast<float>(angular_velocity.z()), 0.0f};
-
-  out_pose->velocity = {static_cast<float>(velocity.x()),
-                        static_cast<float>(velocity.y()),
-                        static_cast<float>(velocity.z()), 0.0f};
-  out_pose->timestamp_ns = sample.time_ns;
-  out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
-  memset(out_pose->pad, 0, sizeof(out_pose->pad));
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libposepredictor/predictor.cpp b/libs/vr/libposepredictor/predictor.cpp
new file mode 100644
index 0000000..266e7ef
--- /dev/null
+++ b/libs/vr/libposepredictor/predictor.cpp
@@ -0,0 +1,34 @@
+#include <linear_predictor.h>
+#include <polynomial_predictor.h>
+#include <predictor.h>
+
+namespace posepredictor {
+
+vec3 Predictor::AngularVelocity(const quat& a, const quat& b, real delta_time) {
+  const auto delta_q = b.inverse() * a;
+  // Check that delta_q.w() == 1, Eigen doesn't respect this convention. If
+  // delta_q.w() == -1, we'll get the opposite velocity.
+  return 2.0 * (delta_q.w() < 0 ? -delta_q.vec() : delta_q.vec()) / delta_time;
+}
+
+Velocity Predictor::PredictVelocity(int64_t time_ns) const {
+  const auto a = Predict(time_ns - kFiniteDifferenceNs);
+  const auto b = Predict(time_ns + kFiniteDifferenceNs);
+  const auto delta_time = NsToSeconds(2 * kFiniteDifferenceNs);
+
+  return {(b.position - a.position) / delta_time,
+          AngularVelocity(a.orientation, b.orientation, delta_time)};
+}
+
+// The factory method.
+std::unique_ptr<Predictor> Predictor::Create(PredictorType type) {
+  switch (type) {
+    case PredictorType::Linear:
+      return std::make_unique<LinearPosePredictor>();
+    case PredictorType::Quadric:
+      return std::make_unique<QuadricPosePredictor>();
+    case PredictorType::Cubic:
+      return std::make_unique<CubicPosePredictor>();
+  }
+}
+}  // namespace posepredictor
diff --git a/libs/vr/libposepredictor/pose_predictor_tests.cpp b/libs/vr/libposepredictor/predictor_tests.cpp
similarity index 68%
rename from libs/vr/libposepredictor/pose_predictor_tests.cpp
rename to libs/vr/libposepredictor/predictor_tests.cpp
index 1e58b11..e84a93a 100644
--- a/libs/vr/libposepredictor/pose_predictor_tests.cpp
+++ b/libs/vr/libposepredictor/predictor_tests.cpp
@@ -1,36 +1,35 @@
 #include <gtest/gtest.h>
 
-#include <private/dvr/pose_predictor.h>
+#include <predictor.h>
 
-namespace android {
-namespace dvr {
+namespace posepredictor {
 
 namespace {
 
 // For comparing expected and actual.
-constexpr double kAbsErrorTolerance = 1e-4;
+constexpr real kAbsErrorTolerance = 1e-4;
 
 // Test the angular velocity computation from two orientations.
 TEST(PosePredictor, AngularVelocity) {
   // Some random rotation axis we will rotate around.
-  const vec3d kRotationAxis = vec3d(1, 2, 3).normalized();
+  const vec3 kRotationAxis = vec3(1, 2, 3).normalized();
 
   // Some random angle we will be rotating by.
-  const double kRotationAngle = M_PI / 30;
+  const real kRotationAngle = M_PI / 30;
 
   // Random start orientation we are currently at.
-  const quatd kStartOrientation = quatd(5, 3, 4, 1).normalized();
+  const quat kStartOrientation = quat(5, 3, 4, 1).normalized();
 
   // The orientation we will end up at.
-  const quatd kEndOrientation =
+  const quat kEndOrientation =
       kStartOrientation *
-      quatd(Eigen::AngleAxis<double>(kRotationAngle, kRotationAxis));
+      quat(Eigen::AngleAxis<real>(kRotationAngle, kRotationAxis));
 
   // The delta time for going from start orientation to end.
-  const float kDeltaTime = 1.0;
+  const real kDeltaTime = 1.0;
 
   // Compute the angular velocity from start orientation to end.
-  const auto angularVelocity = PosePredictor::AngularVelocity(
+  const auto angularVelocity = Predictor::AngularVelocity(
       kStartOrientation, kEndOrientation, kDeltaTime);
 
   // Extract the axis and the angular speed.
@@ -48,5 +47,4 @@
 
 }  // namespace
 
-}  // namespace dvr
-}  // namespace android
+}  // namespace posepredictor
diff --git a/services/surfaceflinger/Android.mk b/services/surfaceflinger/Android.mk
index ec47c8a..24c68ec 100644
--- a/services/surfaceflinger/Android.mk
+++ b/services/surfaceflinger/Android.mk
@@ -189,10 +189,6 @@
 
 LOCAL_INIT_RC := surfaceflinger.rc
 
-ifneq ($(ENABLE_CPUSETS),)
-    LOCAL_CFLAGS += -DENABLE_CPUSETS
-endif
-
 ifeq ($(TARGET_USES_HWC2),true)
     LOCAL_CFLAGS += -DUSE_HWC2
 endif
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.h b/services/surfaceflinger/DisplayHardware/HWC2.h
index e129a3a..4419dc1 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2.h
@@ -24,7 +24,7 @@
 #undef HWC2_USE_CPP11
 
 #include <ui/HdrCapabilities.h>
-#include <ui/mat4.h>
+#include <math/mat4.h>
 
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
diff --git a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
index 1d2c178..d72139e 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
@@ -34,33 +34,6 @@
 
 using namespace std::chrono_literals;
 
-static bool operator==(const hwc_color_t& lhs, const hwc_color_t& rhs) {
-    return lhs.r == rhs.r &&
-            lhs.g == rhs.g &&
-            lhs.b == rhs.b &&
-            lhs.a == rhs.a;
-}
-
-static bool operator==(const hwc_rect_t& lhs, const hwc_rect_t& rhs) {
-    return lhs.left == rhs.left &&
-            lhs.top == rhs.top &&
-            lhs.right == rhs.right &&
-            lhs.bottom == rhs.bottom;
-}
-
-static bool operator==(const hwc_frect_t& lhs, const hwc_frect_t& rhs) {
-    return lhs.left == rhs.left &&
-            lhs.top == rhs.top &&
-            lhs.right == rhs.right &&
-            lhs.bottom == rhs.bottom;
-}
-
-template <typename T>
-static inline bool operator!=(const T& lhs, const T& rhs)
-{
-    return !(lhs == rhs);
-}
-
 static uint8_t getMinorVersion(struct hwc_composer_device_1* device)
 {
     auto version = device->common.version & HARDWARE_API_VERSION_2_MAJ_MIN_MASK;
@@ -80,19 +53,6 @@
 
 namespace android {
 
-void HWC2On1Adapter::DisplayContentsDeleter::operator()(
-        hwc_display_contents_1_t* contents)
-{
-    if (contents != nullptr) {
-        for (size_t l = 0; l < contents->numHwLayers; ++l) {
-            auto& layer = contents->hwLayers[l];
-            std::free(const_cast<hwc_rect_t*>(layer.visibleRegionScreen.rects));
-            std::free(const_cast<hwc_rect_t*>(layer.surfaceDamage.rects));
-        }
-    }
-    std::free(contents);
-}
-
 class HWC2On1Adapter::Callbacks : public hwc_procs_t {
     public:
         explicit Callbacks(HWC2On1Adapter& adapter) : mAdapter(adapter) {
@@ -161,8 +121,7 @@
 }
 
 void HWC2On1Adapter::doGetCapabilities(uint32_t* outCount,
-        int32_t* outCapabilities)
-{
+        int32_t* outCapabilities) {
     if (outCapabilities == nullptr) {
         *outCount = mCapabilities.size();
         return;
@@ -179,8 +138,7 @@
 }
 
 hwc2_function_pointer_t HWC2On1Adapter::doGetFunction(
-        FunctionDescriptor descriptor)
-{
+        FunctionDescriptor descriptor) {
     switch (descriptor) {
         // Device functions
         case FunctionDescriptor::CreateVirtualDisplay:
@@ -350,8 +308,7 @@
 // Device functions
 
 Error HWC2On1Adapter::createVirtualDisplay(uint32_t width,
-        uint32_t height, hwc2_display_t* outDisplay)
-{
+        uint32_t height, hwc2_display_t* outDisplay) {
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
 
     if (mHwc1VirtualDisplay) {
@@ -381,8 +338,7 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::destroyVirtualDisplay(hwc2_display_t displayId)
-{
+Error HWC2On1Adapter::destroyVirtualDisplay(hwc2_display_t displayId) {
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
 
     if (!mHwc1VirtualDisplay || (mHwc1VirtualDisplay->getId() != displayId)) {
@@ -396,8 +352,7 @@
     return Error::None;
 }
 
-void HWC2On1Adapter::dump(uint32_t* outSize, char* outBuffer)
-{
+void HWC2On1Adapter::dump(uint32_t* outSize, char* outBuffer) {
     if (outBuffer != nullptr) {
         auto copiedBytes = mDumpString.copy(outBuffer, *outSize);
         *outSize = static_cast<uint32_t>(copiedBytes);
@@ -450,8 +405,7 @@
     *outSize = static_cast<uint32_t>(mDumpString.size());
 }
 
-uint32_t HWC2On1Adapter::getMaxVirtualDisplayCount()
-{
+uint32_t HWC2On1Adapter::getMaxVirtualDisplayCount() {
     return mHwc1SupportsVirtualDisplays ? 1 : 0;
 }
 
@@ -465,8 +419,7 @@
 }
 
 Error HWC2On1Adapter::registerCallback(Callback descriptor,
-        hwc2_callback_data_t callbackData, hwc2_function_pointer_t pointer)
-{
+        hwc2_callback_data_t callbackData, hwc2_function_pointer_t pointer) {
     if (!isValid(descriptor)) {
         return Error::BadParameter;
     }
@@ -553,11 +506,8 @@
 HWC2On1Adapter::Display::Display(HWC2On1Adapter& device, HWC2::DisplayType type)
   : mId(sNextId++),
     mDevice(device),
-    mDirtyCount(0),
     mStateMutex(),
-    mZIsDirty(false),
     mHwc1RequestedContents(nullptr),
-    mHwc1ReceivedContents(nullptr),
     mRetireFence(),
     mChanges(),
     mHwc1Id(-1),
@@ -572,10 +522,13 @@
     mOutputBuffer(),
     mHasColorTransform(false),
     mLayers(),
-    mHwc1LayerMap() {}
+    mHwc1LayerMap(),
+    mNumAvailableRects(0),
+    mNextAvailableRect(nullptr),
+    mGeometryChanged(false)
+    {}
 
-Error HWC2On1Adapter::Display::acceptChanges()
-{
+Error HWC2On1Adapter::Display::acceptChanges() {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!mChanges) {
@@ -594,25 +547,21 @@
 
     mChanges->clearTypeChanges();
 
-    mHwc1RequestedContents = std::move(mHwc1ReceivedContents);
-
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::createLayer(hwc2_layer_t* outLayerId)
-{
+Error HWC2On1Adapter::Display::createLayer(hwc2_layer_t* outLayerId) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     auto layer = *mLayers.emplace(std::make_shared<Layer>(*this));
     mDevice.mLayers.emplace(std::make_pair(layer->getId(), layer));
     *outLayerId = layer->getId();
     ALOGV("[%" PRIu64 "] created layer %" PRIu64, mId, *outLayerId);
-    mZIsDirty = true;
+    markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::destroyLayer(hwc2_layer_t layerId)
-{
+Error HWC2On1Adapter::Display::destroyLayer(hwc2_layer_t layerId) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     const auto mapLayer = mDevice.mLayers.find(layerId);
@@ -631,12 +580,11 @@
         }
     }
     ALOGV("[%" PRIu64 "] destroyed layer %" PRIu64, mId, layerId);
-    mZIsDirty = true;
+    markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::getActiveConfig(hwc2_config_t* outConfig)
-{
+Error HWC2On1Adapter::Display::getActiveConfig(hwc2_config_t* outConfig) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!mActiveConfig) {
@@ -651,8 +599,7 @@
 }
 
 Error HWC2On1Adapter::Display::getAttribute(hwc2_config_t configId,
-        Attribute attribute, int32_t* outValue)
-{
+        Attribute attribute, int32_t* outValue) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (configId > mConfigs.size() || !mConfigs[configId]->isOnDisplay(*this)) {
@@ -667,8 +614,7 @@
 }
 
 Error HWC2On1Adapter::Display::getChangedCompositionTypes(
-        uint32_t* outNumElements, hwc2_layer_t* outLayers, int32_t* outTypes)
-{
+        uint32_t* outNumElements, hwc2_layer_t* outLayers, int32_t* outTypes) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!mChanges) {
@@ -701,8 +647,7 @@
 }
 
 Error HWC2On1Adapter::Display::getColorModes(uint32_t* outNumModes,
-        int32_t* outModes)
-{
+        int32_t* outModes) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!outModes) {
@@ -717,8 +662,7 @@
 }
 
 Error HWC2On1Adapter::Display::getConfigs(uint32_t* outNumConfigs,
-        hwc2_config_t* outConfigs)
-{
+        hwc2_config_t* outConfigs) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!outConfigs) {
@@ -737,8 +681,7 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::getDozeSupport(int32_t* outSupport)
-{
+Error HWC2On1Adapter::Display::getDozeSupport(int32_t* outSupport) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (mDevice.mHwc1MinorVersion < 4 || mHwc1Id != 0) {
@@ -751,15 +694,13 @@
 
 Error HWC2On1Adapter::Display::getHdrCapabilities(uint32_t* outNumTypes,
         int32_t* /*outTypes*/, float* /*outMaxLuminance*/,
-        float* /*outMaxAverageLuminance*/, float* /*outMinLuminance*/)
-{
+        float* /*outMaxAverageLuminance*/, float* /*outMinLuminance*/) {
     // This isn't supported on HWC1, so per the HWC2 header, return numTypes = 0
     *outNumTypes = 0;
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::getName(uint32_t* outSize, char* outName)
-{
+Error HWC2On1Adapter::Display::getName(uint32_t* outSize, char* outName) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!outName) {
@@ -772,8 +713,7 @@
 }
 
 Error HWC2On1Adapter::Display::getReleaseFences(uint32_t* outNumElements,
-        hwc2_layer_t* outLayers, int32_t* outFences)
-{
+        hwc2_layer_t* outLayers, int32_t* outFences) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     uint32_t numWritten = 0;
@@ -799,8 +739,7 @@
 
 Error HWC2On1Adapter::Display::getRequests(int32_t* outDisplayRequests,
         uint32_t* outNumElements, hwc2_layer_t* outLayers,
-        int32_t* outLayerRequests)
-{
+        int32_t* outLayerRequests) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!mChanges) {
@@ -829,16 +768,14 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::getType(int32_t* outType)
-{
+Error HWC2On1Adapter::Display::getType(int32_t* outType) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     *outType = static_cast<int32_t>(mType);
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::present(int32_t* outRetireFence)
-{
+Error HWC2On1Adapter::Display::present(int32_t* outRetireFence) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (mChanges) {
@@ -857,8 +794,7 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::setActiveConfig(hwc2_config_t configId)
-{
+Error HWC2On1Adapter::Display::setActiveConfig(hwc2_config_t configId) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     auto config = getConfig(configId);
@@ -890,8 +826,7 @@
 }
 
 Error HWC2On1Adapter::Display::setClientTarget(buffer_handle_t target,
-        int32_t acquireFence, int32_t /*dataspace*/, hwc_region_t /*damage*/)
-{
+        int32_t acquireFence, int32_t /*dataspace*/, hwc_region_t /*damage*/) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     ALOGV("[%" PRIu64 "] setClientTarget(%p, %d)", mId, target, acquireFence);
@@ -901,8 +836,7 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::setColorMode(android_color_mode_t mode)
-{
+Error HWC2On1Adapter::Display::setColorMode(android_color_mode_t mode) {
     std::unique_lock<std::recursive_mutex> lock (mStateMutex);
 
     ALOGV("[%" PRIu64 "] setColorMode(%d)", mId, mode);
@@ -933,8 +867,7 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::Display::setColorTransform(android_color_transform_t hint)
-{
+Error HWC2On1Adapter::Display::setColorTransform(android_color_transform_t hint) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     ALOGV("%" PRIu64 "] setColorTransform(%d)", mId,
@@ -944,8 +877,7 @@
 }
 
 Error HWC2On1Adapter::Display::setOutputBuffer(buffer_handle_t buffer,
-        int32_t releaseFence)
-{
+        int32_t releaseFence) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     ALOGV("[%" PRIu64 "] setOutputBuffer(%p, %d)", mId, buffer, releaseFence);
@@ -954,30 +886,25 @@
     return Error::None;
 }
 
-static bool isValid(PowerMode mode)
-{
+static bool isValid(PowerMode mode) {
     switch (mode) {
         case PowerMode::Off: // Fall-through
         case PowerMode::DozeSuspend: // Fall-through
         case PowerMode::Doze: // Fall-through
         case PowerMode::On: return true;
-        default: return false;
     }
 }
 
-static int getHwc1PowerMode(PowerMode mode)
-{
+static int getHwc1PowerMode(PowerMode mode) {
     switch (mode) {
         case PowerMode::Off: return HWC_POWER_MODE_OFF;
         case PowerMode::DozeSuspend: return HWC_POWER_MODE_DOZE_SUSPEND;
         case PowerMode::Doze: return HWC_POWER_MODE_DOZE;
         case PowerMode::On: return HWC_POWER_MODE_NORMAL;
-        default: return HWC_POWER_MODE_OFF;
     }
 }
 
-Error HWC2On1Adapter::Display::setPowerMode(PowerMode mode)
-{
+Error HWC2On1Adapter::Display::setPowerMode(PowerMode mode) {
     if (!isValid(mode)) {
         return Error::BadParameter;
     }
@@ -1007,12 +934,11 @@
     switch (enable) {
         case Vsync::Enable: // Fall-through
         case Vsync::Disable: return true;
-        default: return false;
+        case Vsync::Invalid: return false;
     }
 }
 
-Error HWC2On1Adapter::Display::setVsyncEnabled(Vsync enable)
-{
+Error HWC2On1Adapter::Display::setVsyncEnabled(Vsync enable) {
     if (!isValid(enable)) {
         return Error::BadParameter;
     }
@@ -1032,8 +958,7 @@
 }
 
 Error HWC2On1Adapter::Display::validate(uint32_t* outNumTypes,
-        uint32_t* outNumRequests)
-{
+        uint32_t* outNumRequests) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     ALOGV("[%" PRIu64 "] Entering validate", mId);
@@ -1042,6 +967,8 @@
         if (!mDevice.prepareAllDisplays()) {
             return Error::BadDisplay;
         }
+    } else {
+        ALOGE("Validate was called more than once!");
     }
 
     *outNumTypes = mChanges->getNumTypes();
@@ -1055,10 +982,7 @@
     return *outNumTypes > 0 ? Error::HasChanges : Error::None;
 }
 
-// Display helpers
-
-Error HWC2On1Adapter::Display::updateLayerZ(hwc2_layer_t layerId, uint32_t z)
-{
+Error HWC2On1Adapter::Display::updateLayerZ(hwc2_layer_t layerId, uint32_t z) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     const auto mapLayer = mDevice.mLayers.find(layerId);
@@ -1090,7 +1014,7 @@
 
     layer->setZ(z);
     mLayers.emplace(std::move(layer));
-    mZIsDirty = true;
+    markGeometryChanged();
 
     return Error::None;
 }
@@ -1159,8 +1083,7 @@
 static_assert(attributesMatch<HWC_DISPLAY_COLOR_TRANSFORM>(),
         "Tables out of sync");
 
-void HWC2On1Adapter::Display::populateConfigs()
-{
+void HWC2On1Adapter::Display::populateConfigs() {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     ALOGV("[%" PRIu64 "] populateConfigs", mId);
@@ -1238,8 +1161,7 @@
     populateColorModes();
 }
 
-void HWC2On1Adapter::Display::populateConfigs(uint32_t width, uint32_t height)
-{
+void HWC2On1Adapter::Display::populateConfigs(uint32_t width, uint32_t height) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     mConfigs.emplace_back(std::make_shared<Config>(*this));
@@ -1252,8 +1174,7 @@
     mActiveConfig = config;
 }
 
-bool HWC2On1Adapter::Display::prepare()
-{
+bool HWC2On1Adapter::Display::prepare() {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     // Only prepare display contents for displays HWC1 knows about
@@ -1270,86 +1191,45 @@
 
     ALOGV("[%" PRIu64 "] Entering prepare", mId);
 
-    auto currentCount = mHwc1RequestedContents ?
-            mHwc1RequestedContents->numHwLayers : 0;
-    auto requiredCount = mLayers.size() + 1;
-    ALOGV("[%" PRIu64 "]   Requires %zd layers, %zd allocated in %p", mId,
-            requiredCount, currentCount, mHwc1RequestedContents.get());
-
-    bool layerCountChanged = (currentCount != requiredCount);
-    if (layerCountChanged) {
-        reallocateHwc1Contents();
-    }
-
-    bool applyAllState = false;
-    if (layerCountChanged || mZIsDirty) {
-        assignHwc1LayerIds();
-        mZIsDirty = false;
-        applyAllState = true;
-    }
+    allocateRequestedContents();
+    assignHwc1LayerIds();
 
     mHwc1RequestedContents->retireFenceFd = -1;
     mHwc1RequestedContents->flags = 0;
-    if (isDirty() || applyAllState) {
+    if (mGeometryChanged) {
         mHwc1RequestedContents->flags |= HWC_GEOMETRY_CHANGED;
     }
+    mHwc1RequestedContents->outbuf = mOutputBuffer.getBuffer();
+    mHwc1RequestedContents->outbufAcquireFenceFd = mOutputBuffer.getFence();
 
+    // +1 is for framebuffer target layer.
+    mHwc1RequestedContents->numHwLayers = mLayers.size() + 1;
     for (auto& layer : mLayers) {
         auto& hwc1Layer = mHwc1RequestedContents->hwLayers[layer->getHwc1Id()];
         hwc1Layer.releaseFenceFd = -1;
         hwc1Layer.acquireFenceFd = -1;
         ALOGV("Applying states for layer %" PRIu64 " ", layer->getId());
-        layer->applyState(hwc1Layer, applyAllState);
+        layer->applyState(hwc1Layer);
     }
 
-    mHwc1RequestedContents->outbuf = mOutputBuffer.getBuffer();
-    mHwc1RequestedContents->outbufAcquireFenceFd = mOutputBuffer.getFence();
-
     prepareFramebufferTarget();
 
+    resetGeometryMarker();
+
     return true;
 }
 
-static void cloneHWCRegion(hwc_region_t& region)
-{
-    auto size = sizeof(hwc_rect_t) * region.numRects;
-    auto newRects = static_cast<hwc_rect_t*>(std::malloc(size));
-    std::copy_n(region.rects, region.numRects, newRects);
-    region.rects = newRects;
-}
-
-HWC2On1Adapter::Display::HWC1Contents
-        HWC2On1Adapter::Display::cloneRequestedContents() const
-{
+void HWC2On1Adapter::Display::generateChanges() {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
-    size_t size = sizeof(hwc_display_contents_1_t) +
-            sizeof(hwc_layer_1_t) * (mHwc1RequestedContents->numHwLayers);
-    auto contents = static_cast<hwc_display_contents_1_t*>(std::malloc(size));
-    std::memcpy(contents, mHwc1RequestedContents.get(), size);
-    for (size_t layerId = 0; layerId < contents->numHwLayers; ++layerId) {
-        auto& layer = contents->hwLayers[layerId];
-        // Deep copy the regions to avoid double-frees
-        cloneHWCRegion(layer.visibleRegionScreen);
-        cloneHWCRegion(layer.surfaceDamage);
-    }
-    return HWC1Contents(contents);
-}
-
-void HWC2On1Adapter::Display::setReceivedContents(HWC1Contents contents)
-{
-    std::unique_lock<std::recursive_mutex> lock(mStateMutex);
-
-    mHwc1ReceivedContents = std::move(contents);
-
     mChanges.reset(new Changes);
 
-    size_t numLayers = mHwc1ReceivedContents->numHwLayers;
+    size_t numLayers = mHwc1RequestedContents->numHwLayers;
     for (size_t hwc1Id = 0; hwc1Id < numLayers; ++hwc1Id) {
-        const auto& receivedLayer = mHwc1ReceivedContents->hwLayers[hwc1Id];
+        const auto& receivedLayer = mHwc1RequestedContents->hwLayers[hwc1Id];
         if (mHwc1LayerMap.count(hwc1Id) == 0) {
             ALOGE_IF(receivedLayer.compositionType != HWC_FRAMEBUFFER_TARGET,
-                    "setReceivedContents: HWC1 layer %zd doesn't have a"
+                    "generateChanges: HWC1 layer %zd doesn't have a"
                     " matching HWC2 layer, and isn't the framebuffer target",
                     hwc1Id);
             continue;
@@ -1361,14 +1241,12 @@
     }
 }
 
-bool HWC2On1Adapter::Display::hasChanges() const
-{
+bool HWC2On1Adapter::Display::hasChanges() const {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
     return mChanges != nullptr;
 }
 
-Error HWC2On1Adapter::Display::set(hwc_display_contents_1& hwcContents)
-{
+Error HWC2On1Adapter::Display::set(hwc_display_contents_1& hwcContents) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     if (!mChanges || (mChanges->getNumTypes() > 0)) {
@@ -1404,15 +1282,13 @@
     return Error::None;
 }
 
-void HWC2On1Adapter::Display::addRetireFence(int fenceFd)
-{
+void HWC2On1Adapter::Display::addRetireFence(int fenceFd) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
     mRetireFence.add(fenceFd);
 }
 
 void HWC2On1Adapter::Display::addReleaseFences(
-        const hwc_display_contents_1_t& hwcContents)
-{
+        const hwc_display_contents_1_t& hwcContents) {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     size_t numLayers = hwcContents.numHwLayers;
@@ -1439,14 +1315,12 @@
     }
 }
 
-bool HWC2On1Adapter::Display::hasColorTransform() const
-{
+bool HWC2On1Adapter::Display::hasColorTransform() const {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
     return mHasColorTransform;
 }
 
-static std::string hwc1CompositionString(int32_t type)
-{
+static std::string hwc1CompositionString(int32_t type) {
     switch (type) {
         case HWC_FRAMEBUFFER: return "Framebuffer";
         case HWC_OVERLAY: return "Overlay";
@@ -1459,8 +1333,7 @@
     }
 }
 
-static std::string hwc1TransformString(int32_t transform)
-{
+static std::string hwc1TransformString(int32_t transform) {
     switch (transform) {
         case 0: return "None";
         case HWC_TRANSFORM_FLIP_H: return "FlipH";
@@ -1475,8 +1348,7 @@
     }
 }
 
-static std::string hwc1BlendModeString(int32_t mode)
-{
+static std::string hwc1BlendModeString(int32_t mode) {
     switch (mode) {
         case HWC_BLENDING_NONE: return "None";
         case HWC_BLENDING_PREMULT: return "Premultiplied";
@@ -1486,16 +1358,14 @@
     }
 }
 
-static std::string rectString(hwc_rect_t rect)
-{
+static std::string rectString(hwc_rect_t rect) {
     std::stringstream output;
     output << "[" << rect.left << ", " << rect.top << ", ";
     output << rect.right << ", " << rect.bottom << "]";
     return output.str();
 }
 
-static std::string approximateFloatString(float f)
-{
+static std::string approximateFloatString(float f) {
     if (static_cast<int32_t>(f) == f) {
         return std::to_string(static_cast<int32_t>(f));
     }
@@ -1508,8 +1378,7 @@
     return std::string(buffer, bytesWritten);
 }
 
-static std::string frectString(hwc_frect_t frect)
-{
+static std::string frectString(hwc_frect_t frect) {
     std::stringstream output;
     output << "[" << approximateFloatString(frect.left) << ", ";
     output << approximateFloatString(frect.top) << ", ";
@@ -1518,8 +1387,7 @@
     return output.str();
 }
 
-static std::string colorString(hwc_color_t color)
-{
+static std::string colorString(hwc_color_t color) {
     std::stringstream output;
     output << "RGBA [";
     output << static_cast<int32_t>(color.r) << ", ";
@@ -1529,8 +1397,7 @@
     return output.str();
 }
 
-static std::string alphaString(float f)
-{
+static std::string alphaString(float f) {
     const size_t BUFFER_SIZE = 8;
     char buffer[BUFFER_SIZE] = {};
     auto bytesWritten = snprintf(buffer, BUFFER_SIZE, "%.3f", f);
@@ -1538,8 +1405,7 @@
 }
 
 static std::string to_string(const hwc_layer_1_t& hwcLayer,
-        int32_t hwc1MinorVersion)
-{
+        int32_t hwc1MinorVersion) {
     const char* fill = "          ";
 
     std::stringstream output;
@@ -1599,8 +1465,7 @@
 }
 
 static std::string to_string(const hwc_display_contents_1_t& hwcContents,
-        int32_t hwc1MinorVersion)
-{
+        int32_t hwc1MinorVersion) {
     const char* fill = "      ";
 
     std::stringstream output;
@@ -1622,8 +1487,7 @@
     return output.str();
 }
 
-std::string HWC2On1Adapter::Display::dump() const
-{
+std::string HWC2On1Adapter::Display::dump() const {
     std::unique_lock<std::recursive_mutex> lock(mStateMutex);
 
     std::stringstream output;
@@ -1663,10 +1527,7 @@
         output << "    Output buffer: " << mOutputBuffer.getBuffer() << '\n';
     }
 
-    if (mHwc1ReceivedContents) {
-        output << "    Last received HWC1 state\n";
-        output << to_string(*mHwc1ReceivedContents, mDevice.mHwc1MinorVersion);
-    } else if (mHwc1RequestedContents) {
+    if (mHwc1RequestedContents) {
         output << "    Last requested HWC1 state\n";
         output << to_string(*mHwc1RequestedContents, mDevice.mHwc1MinorVersion);
     }
@@ -1674,28 +1535,46 @@
     return output.str();
 }
 
+hwc_rect_t* HWC2On1Adapter::Display::GetRects(size_t numRects) {
+    if (numRects == 0) {
+        return nullptr;
+    }
+
+    if (numRects > mNumAvailableRects) {
+        // This should NEVER happen since we calculated how many rects the
+        // display would need.
+        ALOGE("Rect allocation failure! SF is likely to crash soon!");
+        return nullptr;
+
+    }
+    hwc_rect_t* rects = mNextAvailableRect;
+    mNextAvailableRect += numRects;
+    mNumAvailableRects -= numRects;
+    return rects;
+}
+
+hwc_display_contents_1* HWC2On1Adapter::Display::getDisplayContents() {
+    return mHwc1RequestedContents.get();
+}
+
 void HWC2On1Adapter::Display::Config::setAttribute(HWC2::Attribute attribute,
-        int32_t value)
-{
+        int32_t value) {
     mAttributes[attribute] = value;
 }
 
-int32_t HWC2On1Adapter::Display::Config::getAttribute(Attribute attribute) const
-{
+int32_t HWC2On1Adapter::Display::Config::getAttribute(Attribute attribute) const {
     if (mAttributes.count(attribute) == 0) {
         return -1;
     }
     return mAttributes.at(attribute);
 }
 
-void HWC2On1Adapter::Display::Config::setHwc1Id(uint32_t id)
-{
+void HWC2On1Adapter::Display::Config::setHwc1Id(uint32_t id) {
     android_color_mode_t colorMode = static_cast<android_color_mode_t>(getAttribute(ColorMode));
     mHwc1Ids.emplace(colorMode, id);
 }
 
-bool HWC2On1Adapter::Display::Config::hasHwc1Id(uint32_t id) const
-{
+bool HWC2On1Adapter::Display::Config::hasHwc1Id(uint32_t id) const {
     for (const auto& idPair : mHwc1Ids) {
         if (id == idPair.second) {
             return true;
@@ -1705,8 +1584,7 @@
 }
 
 Error HWC2On1Adapter::Display::Config::getColorModeForHwc1Id(
-        uint32_t id, android_color_mode_t* outMode) const
-{
+        uint32_t id, android_color_mode_t* outMode) const {
     for (const auto& idPair : mHwc1Ids) {
         if (id == idPair.second) {
             *outMode = idPair.first;
@@ -1718,8 +1596,7 @@
 }
 
 Error HWC2On1Adapter::Display::Config::getHwc1IdForColorMode(android_color_mode_t mode,
-        uint32_t* outId) const
-{
+        uint32_t* outId) const {
     for (const auto& idPair : mHwc1Ids) {
         if (mode == idPair.first) {
             *outId = idPair.second;
@@ -1730,8 +1607,7 @@
     return Error::BadParameter;
 }
 
-bool HWC2On1Adapter::Display::Config::merge(const Config& other)
-{
+bool HWC2On1Adapter::Display::Config::merge(const Config& other) {
     auto attributes = {HWC2::Attribute::Width, HWC2::Attribute::Height,
             HWC2::Attribute::VsyncPeriod, HWC2::Attribute::DpiX,
             HWC2::Attribute::DpiY};
@@ -1753,8 +1629,7 @@
     return true;
 }
 
-std::set<android_color_mode_t> HWC2On1Adapter::Display::Config::getColorModes() const
-{
+std::set<android_color_mode_t> HWC2On1Adapter::Display::Config::getColorModes() const {
     std::set<android_color_mode_t> colorModes;
     for (const auto& idPair : mHwc1Ids) {
         colorModes.emplace(idPair.first);
@@ -1762,8 +1637,7 @@
     return colorModes;
 }
 
-std::string HWC2On1Adapter::Display::Config::toString(bool splitLine) const
-{
+std::string HWC2On1Adapter::Display::Config::toString(bool splitLine) const {
     std::string output;
 
     const size_t BUFFER_SIZE = 100;
@@ -1819,16 +1693,14 @@
 }
 
 std::shared_ptr<const HWC2On1Adapter::Display::Config>
-        HWC2On1Adapter::Display::getConfig(hwc2_config_t configId) const
-{
+        HWC2On1Adapter::Display::getConfig(hwc2_config_t configId) const {
     if (configId > mConfigs.size() || !mConfigs[configId]->isOnDisplay(*this)) {
         return nullptr;
     }
     return mConfigs[configId];
 }
 
-void HWC2On1Adapter::Display::populateColorModes()
-{
+void HWC2On1Adapter::Display::populateColorModes() {
     mColorModes = mConfigs[0]->getColorModes();
     for (const auto& config : mConfigs) {
         std::set<android_color_mode_t> intersection;
@@ -1840,8 +1712,7 @@
     }
 }
 
-void HWC2On1Adapter::Display::initializeActiveConfig()
-{
+void HWC2On1Adapter::Display::initializeActiveConfig() {
     if (mDevice.mHwc1Device->getActiveConfig == nullptr) {
         ALOGV("getActiveConfig is null, choosing config 0");
         mActiveConfig = mConfigs[0];
@@ -1886,22 +1757,40 @@
 
 }
 
-void HWC2On1Adapter::Display::reallocateHwc1Contents()
-{
-    // Allocate an additional layer for the framebuffer target
+void HWC2On1Adapter::Display::allocateRequestedContents() {
+    // What needs to be allocated:
+    // 1 hwc_display_contents_1_t
+    // 1 hwc_layer_1_t for each layer
+    // 1 hwc_rect_t for each layer's surfaceDamage
+    // 1 hwc_rect_t for each layer's visibleRegion
+    // 1 hwc_layer_1_t for the framebuffer
+    // 1 hwc_rect_t for the framebuffer's visibleRegion
+
+    // Count # of surfaceDamage
+    size_t numSurfaceDamages = 0;
+    for (const auto& layer : mLayers) {
+        numSurfaceDamages += layer->getNumSurfaceDamages();
+    }
+
+    // Count # of visibleRegions (start at 1 for mandatory framebuffer target
+    // region)
+    size_t numVisibleRegion = 1;
+    for (const auto& layer : mLayers) {
+        numVisibleRegion += layer->getNumVisibleRegions();
+    }
+
+    size_t numRects = numVisibleRegion + numSurfaceDamages;
     auto numLayers = mLayers.size() + 1;
     size_t size = sizeof(hwc_display_contents_1_t) +
-            sizeof(hwc_layer_1_t) * numLayers;
-    ALOGV("[%" PRIu64 "] reallocateHwc1Contents creating %zd layer%s", mId,
-            numLayers, numLayers != 1 ? "s" : "");
-    auto contents =
-            static_cast<hwc_display_contents_1_t*>(std::calloc(size, 1));
-    contents->numHwLayers = numLayers;
+            sizeof(hwc_layer_1_t) * numLayers +
+            sizeof(hwc_rect_t) * numRects;
+    auto contents = static_cast<hwc_display_contents_1_t*>(std::calloc(size, 1));
     mHwc1RequestedContents.reset(contents);
+    mNextAvailableRect = reinterpret_cast<hwc_rect_t*>(&contents->hwLayers[numLayers]);
+    mNumAvailableRects = numRects;
 }
 
-void HWC2On1Adapter::Display::assignHwc1LayerIds()
-{
+void HWC2On1Adapter::Display::assignHwc1LayerIds() {
     mHwc1LayerMap.clear();
     size_t nextHwc1Id = 0;
     for (auto& layer : mLayers) {
@@ -1911,8 +1800,7 @@
 }
 
 void HWC2On1Adapter::Display::updateTypeChanges(const hwc_layer_1_t& hwc1Layer,
-        const Layer& layer)
-{
+        const Layer& layer) {
     auto layerId = layer.getId();
     switch (hwc1Layer.compositionType) {
         case HWC_FRAMEBUFFER:
@@ -1947,16 +1835,14 @@
 }
 
 void HWC2On1Adapter::Display::updateLayerRequests(
-        const hwc_layer_1_t& hwc1Layer, const Layer& layer)
-{
+        const hwc_layer_1_t& hwc1Layer, const Layer& layer) {
     if ((hwc1Layer.hints & HWC_HINT_CLEAR_FB) != 0) {
         mChanges->addLayerRequest(layer.getId(),
                 LayerRequest::ClearClientTarget);
     }
 }
 
-void HWC2On1Adapter::Display::prepareFramebufferTarget()
-{
+void HWC2On1Adapter::Display::prepareFramebufferTarget() {
     // We check that mActiveConfig is valid in Display::prepare
     int32_t width = mActiveConfig->getAttribute(Attribute::Width);
     int32_t height = mActiveConfig->getAttribute(Attribute::Height);
@@ -1976,8 +1862,9 @@
     }
     hwc1Target.displayFrame = {0, 0, width, height};
     hwc1Target.planeAlpha = 255;
+
     hwc1Target.visibleRegionScreen.numRects = 1;
-    auto rects = static_cast<hwc_rect_t*>(std::malloc(sizeof(hwc_rect_t)));
+    hwc_rect_t* rects = GetRects(1);
     rects[0].left = 0;
     rects[0].top = 0;
     rects[0].right = width;
@@ -1995,42 +1882,37 @@
 HWC2On1Adapter::Layer::Layer(Display& display)
   : mId(sNextId++),
     mDisplay(display),
-    mDirtyCount(0),
     mBuffer(),
     mSurfaceDamage(),
-    mBlendMode(*this, BlendMode::None),
-    mColor(*this, {0, 0, 0, 0}),
-    mCompositionType(*this, Composition::Invalid),
-    mDisplayFrame(*this, {0, 0, -1, -1}),
-    mPlaneAlpha(*this, 0.0f),
-    mSidebandStream(*this, nullptr),
-    mSourceCrop(*this, {0.0f, 0.0f, -1.0f, -1.0f}),
-    mTransform(*this, Transform::None),
-    mVisibleRegion(*this, std::vector<hwc_rect_t>()),
+    mBlendMode(BlendMode::None),
+    mColor({0, 0, 0, 0}),
+    mCompositionType(Composition::Invalid),
+    mDisplayFrame({0, 0, -1, -1}),
+    mPlaneAlpha(0.0f),
+    mSidebandStream(nullptr),
+    mSourceCrop({0.0f, 0.0f, -1.0f, -1.0f}),
+    mTransform(Transform::None),
+    mVisibleRegion(),
     mZ(0),
     mReleaseFence(),
     mHwc1Id(0),
-    mHasUnsupportedPlaneAlpha(false),
-    mHasUnsupportedBackgroundColor(false) {}
+    mHasUnsupportedPlaneAlpha(false) {}
 
 bool HWC2On1Adapter::SortLayersByZ::operator()(
-        const std::shared_ptr<Layer>& lhs, const std::shared_ptr<Layer>& rhs)
-{
+        const std::shared_ptr<Layer>& lhs, const std::shared_ptr<Layer>& rhs) {
     return lhs->getZ() < rhs->getZ();
 }
 
 Error HWC2On1Adapter::Layer::setBuffer(buffer_handle_t buffer,
-        int32_t acquireFence)
-{
+        int32_t acquireFence) {
     ALOGV("Setting acquireFence to %d for layer %" PRIu64, acquireFence, mId);
     mBuffer.setBuffer(buffer);
     mBuffer.setFence(acquireFence);
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setCursorPosition(int32_t x, int32_t y)
-{
-    if (mCompositionType.getValue() != Composition::Cursor) {
+Error HWC2On1Adapter::Layer::setCursorPosition(int32_t x, int32_t y) {
+    if (mCompositionType != Composition::Cursor) {
         return Error::BadLayer;
     }
 
@@ -2044,8 +1926,7 @@
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setSurfaceDamage(hwc_region_t damage)
-{
+Error HWC2On1Adapter::Layer::setSurfaceDamage(hwc_region_t damage) {
     // HWC1 supports surface damage starting only with version 1.5.
     if (mDisplay.getDevice().mHwc1MinorVersion < 5) {
         return Error::None;
@@ -2057,104 +1938,91 @@
 
 // Layer state functions
 
-Error HWC2On1Adapter::Layer::setBlendMode(BlendMode mode)
-{
-    mBlendMode.setPending(mode);
+Error HWC2On1Adapter::Layer::setBlendMode(BlendMode mode) {
+    mBlendMode = mode;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setColor(hwc_color_t color)
-{
-    mColor.setPending(color);
+Error HWC2On1Adapter::Layer::setColor(hwc_color_t color) {
+    mColor = color;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setCompositionType(Composition type)
-{
-    mCompositionType.setPending(type);
+Error HWC2On1Adapter::Layer::setCompositionType(Composition type) {
+    mCompositionType = type;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setDataspace(android_dataspace_t)
-{
+Error HWC2On1Adapter::Layer::setDataspace(android_dataspace_t) {
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setDisplayFrame(hwc_rect_t frame)
-{
-    mDisplayFrame.setPending(frame);
+Error HWC2On1Adapter::Layer::setDisplayFrame(hwc_rect_t frame) {
+    mDisplayFrame = frame;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setPlaneAlpha(float alpha)
-{
-    mPlaneAlpha.setPending(alpha);
+Error HWC2On1Adapter::Layer::setPlaneAlpha(float alpha) {
+    mPlaneAlpha = alpha;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setSidebandStream(const native_handle_t* stream)
-{
-    mSidebandStream.setPending(stream);
+Error HWC2On1Adapter::Layer::setSidebandStream(const native_handle_t* stream) {
+    mSidebandStream = stream;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setSourceCrop(hwc_frect_t crop)
-{
-    mSourceCrop.setPending(crop);
+Error HWC2On1Adapter::Layer::setSourceCrop(hwc_frect_t crop) {
+    mSourceCrop = crop;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setTransform(Transform transform)
-{
-    mTransform.setPending(transform);
+Error HWC2On1Adapter::Layer::setTransform(Transform transform) {
+    mTransform = transform;
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setVisibleRegion(hwc_region_t rawVisible)
-{
-    std::vector<hwc_rect_t> visible(rawVisible.rects,
-            rawVisible.rects + rawVisible.numRects);
-    mVisibleRegion.setPending(std::move(visible));
+Error HWC2On1Adapter::Layer::setVisibleRegion(hwc_region_t visible) {
+    mVisibleRegion.resize(visible.numRects);
+    std::copy_n(visible.rects, visible.numRects, mVisibleRegion.begin());
+    mDisplay.markGeometryChanged();
     return Error::None;
 }
 
-Error HWC2On1Adapter::Layer::setZ(uint32_t z)
-{
+Error HWC2On1Adapter::Layer::setZ(uint32_t z) {
     mZ = z;
     return Error::None;
 }
 
-void HWC2On1Adapter::Layer::addReleaseFence(int fenceFd)
-{
+void HWC2On1Adapter::Layer::addReleaseFence(int fenceFd) {
     ALOGV("addReleaseFence %d to layer %" PRIu64, fenceFd, mId);
     mReleaseFence.add(fenceFd);
 }
 
-const sp<Fence>& HWC2On1Adapter::Layer::getReleaseFence() const
-{
+const sp<Fence>& HWC2On1Adapter::Layer::getReleaseFence() const {
     return mReleaseFence.get();
 }
 
-void HWC2On1Adapter::Layer::applyState(hwc_layer_1_t& hwc1Layer,
-        bool applyAllState)
-{
-    applyCommonState(hwc1Layer, applyAllState);
-    auto compositionType = mCompositionType.getPendingValue();
-    if (compositionType == Composition::SolidColor) {
-        applySolidColorState(hwc1Layer, applyAllState);
-    } else if (compositionType == Composition::Sideband) {
-        applySidebandState(hwc1Layer, applyAllState);
-    } else {
-        applyBufferState(hwc1Layer);
+void HWC2On1Adapter::Layer::applyState(hwc_layer_1_t& hwc1Layer) {
+    applyCommonState(hwc1Layer);
+    applyCompositionType(hwc1Layer);
+    switch (mCompositionType) {
+        case Composition::SolidColor : applySolidColorState(hwc1Layer); break;
+        case Composition::Sideband : applySidebandState(hwc1Layer); break;
+        default: applyBufferState(hwc1Layer); break;
     }
-    applyCompositionType(hwc1Layer, applyAllState);
 }
 
-// Layer dump helpers
-
 static std::string regionStrings(const std::vector<hwc_rect_t>& visibleRegion,
-        const std::vector<hwc_rect_t>& surfaceDamage)
-{
+        const std::vector<hwc_rect_t>& surfaceDamage) {
     std::string regions;
     regions += "        Visible Region";
     regions.resize(40, ' ');
@@ -2182,40 +2050,38 @@
     return regions;
 }
 
-std::string HWC2On1Adapter::Layer::dump() const
-{
+std::string HWC2On1Adapter::Layer::dump() const {
     std::stringstream output;
     const char* fill = "      ";
 
-    output << fill << to_string(mCompositionType.getPendingValue());
+    output << fill << to_string(mCompositionType);
     output << " Layer  HWC2/1: " << mId << "/" << mHwc1Id << "  ";
     output << "Z: " << mZ;
-    if (mCompositionType.getValue() == HWC2::Composition::SolidColor) {
-        output << "  " << colorString(mColor.getValue());
-    } else if (mCompositionType.getValue() == HWC2::Composition::Sideband) {
-        output << "  Handle: " << mSidebandStream.getValue() << '\n';
+    if (mCompositionType == HWC2::Composition::SolidColor) {
+        output << "  " << colorString(mColor);
+    } else if (mCompositionType == HWC2::Composition::Sideband) {
+        output << "  Handle: " << mSidebandStream << '\n';
     } else {
         output << "  Buffer: " << mBuffer.getBuffer() << "/" <<
                 mBuffer.getFence() << '\n';
         output << fill << "  Display frame [LTRB]: " <<
-                rectString(mDisplayFrame.getValue()) << '\n';
+                rectString(mDisplayFrame) << '\n';
         output << fill << "  Source crop: " <<
-                frectString(mSourceCrop.getValue()) << '\n';
-        output << fill << "  Transform: " << to_string(mTransform.getValue());
-        output << "  Blend mode: " << to_string(mBlendMode.getValue());
-        if (mPlaneAlpha.getValue() != 1.0f) {
+                frectString(mSourceCrop) << '\n';
+        output << fill << "  Transform: " << to_string(mTransform);
+        output << "  Blend mode: " << to_string(mBlendMode);
+        if (mPlaneAlpha != 1.0f) {
             output << "  Alpha: " <<
-                alphaString(mPlaneAlpha.getValue()) << '\n';
+                alphaString(mPlaneAlpha) << '\n';
         } else {
             output << '\n';
         }
-        output << regionStrings(mVisibleRegion.getValue(), mSurfaceDamage);
+        output << regionStrings(mVisibleRegion, mSurfaceDamage);
     }
     return output.str();
 }
 
-static int getHwc1Blending(HWC2::BlendMode blendMode)
-{
+static int getHwc1Blending(HWC2::BlendMode blendMode) {
     switch (blendMode) {
         case BlendMode::Coverage: return HWC_BLENDING_COVERAGE;
         case BlendMode::Premultiplied: return HWC_BLENDING_PREMULT;
@@ -2223,168 +2089,124 @@
     }
 }
 
-void HWC2On1Adapter::Layer::applyCommonState(hwc_layer_1_t& hwc1Layer,
-        bool applyAllState)
-{
+void HWC2On1Adapter::Layer::applyCommonState(hwc_layer_1_t& hwc1Layer) {
     auto minorVersion = mDisplay.getDevice().getHwc1MinorVersion();
-    if (applyAllState || mBlendMode.isDirty()) {
-        hwc1Layer.blending = getHwc1Blending(mBlendMode.getPendingValue());
-        mBlendMode.latch();
-    }
-    if (applyAllState || mDisplayFrame.isDirty()) {
-        hwc1Layer.displayFrame = mDisplayFrame.getPendingValue();
-        mDisplayFrame.latch();
-    }
-    if (applyAllState || mPlaneAlpha.isDirty()) {
-        auto pendingAlpha = mPlaneAlpha.getPendingValue();
-        if (minorVersion < 2) {
-            mHasUnsupportedPlaneAlpha = pendingAlpha < 1.0f;
-        } else {
-            hwc1Layer.planeAlpha =
-                    static_cast<uint8_t>(255.0f * pendingAlpha + 0.5f);
-        }
-        mPlaneAlpha.latch();
-    }
-    if (applyAllState || mSourceCrop.isDirty()) {
-        if (minorVersion < 3) {
-            auto pending = mSourceCrop.getPendingValue();
-            hwc1Layer.sourceCropi.left =
-                    static_cast<int32_t>(std::ceil(pending.left));
-            hwc1Layer.sourceCropi.top =
-                    static_cast<int32_t>(std::ceil(pending.top));
-            hwc1Layer.sourceCropi.right =
-                    static_cast<int32_t>(std::floor(pending.right));
-            hwc1Layer.sourceCropi.bottom =
-                    static_cast<int32_t>(std::floor(pending.bottom));
-        } else {
-            hwc1Layer.sourceCropf = mSourceCrop.getPendingValue();
-        }
-        mSourceCrop.latch();
-    }
-    if (applyAllState || mTransform.isDirty()) {
-        hwc1Layer.transform =
-                static_cast<uint32_t>(mTransform.getPendingValue());
-        mTransform.latch();
-    }
-    if (applyAllState || mVisibleRegion.isDirty()) {
-        auto& hwc1VisibleRegion = hwc1Layer.visibleRegionScreen;
+    hwc1Layer.blending = getHwc1Blending(mBlendMode);
+    hwc1Layer.displayFrame = mDisplayFrame;
 
-        std::free(const_cast<hwc_rect_t*>(hwc1VisibleRegion.rects));
+    auto pendingAlpha = mPlaneAlpha;
+    if (minorVersion < 2) {
+        mHasUnsupportedPlaneAlpha = pendingAlpha < 1.0f;
+    } else {
+        hwc1Layer.planeAlpha =
+                static_cast<uint8_t>(255.0f * pendingAlpha + 0.5f);
+    }
 
-        auto pending = mVisibleRegion.getPendingValue();
-        hwc_rect_t* newRects = static_cast<hwc_rect_t*>(
-                std::malloc(sizeof(hwc_rect_t) * pending.size()));
-        std::copy(pending.begin(), pending.end(), newRects);
-        hwc1VisibleRegion.rects = const_cast<const hwc_rect_t*>(newRects);
-        hwc1VisibleRegion.numRects = pending.size();
-        mVisibleRegion.latch();
+    if (minorVersion < 3) {
+        auto pending = mSourceCrop;
+        hwc1Layer.sourceCropi.left =
+                static_cast<int32_t>(std::ceil(pending.left));
+        hwc1Layer.sourceCropi.top =
+                static_cast<int32_t>(std::ceil(pending.top));
+        hwc1Layer.sourceCropi.right =
+                static_cast<int32_t>(std::floor(pending.right));
+        hwc1Layer.sourceCropi.bottom =
+                static_cast<int32_t>(std::floor(pending.bottom));
+    } else {
+        hwc1Layer.sourceCropf = mSourceCrop;
+    }
+
+    hwc1Layer.transform = static_cast<uint32_t>(mTransform);
+
+    auto& hwc1VisibleRegion = hwc1Layer.visibleRegionScreen;
+    hwc1VisibleRegion.numRects = mVisibleRegion.size();
+    hwc_rect_t* rects = mDisplay.GetRects(hwc1VisibleRegion.numRects);
+    hwc1VisibleRegion.rects = rects;
+    for (size_t i = 0; i < mVisibleRegion.size(); i++) {
+        rects[i] = mVisibleRegion[i];
     }
 }
 
-void HWC2On1Adapter::Layer::applySolidColorState(hwc_layer_1_t& hwc1Layer,
-        bool applyAllState)
-{
-    if (applyAllState || mColor.isDirty()) {
-        // If the device does not support background color it is likely to make
-        // assumption regarding backgroundColor and handle (both fields occupy
-        // the same location in hwc_layer_1_t union).
-        // To not confuse these devices we don't set background color and we
-        // make sure handle is a null pointer.
-        if (mDisplay.getDevice().supportsBackgroundColor()) {
-            hwc1Layer.backgroundColor = mColor.getPendingValue();
-            mHasUnsupportedBackgroundColor = false;
-        } else {
-            hwc1Layer.handle = nullptr;
-            mHasUnsupportedBackgroundColor = true;
-        }
-        mColor.latch();
+void HWC2On1Adapter::Layer::applySolidColorState(hwc_layer_1_t& hwc1Layer) {
+    // If the device does not support background color it is likely to make
+    // assumption regarding backgroundColor and handle (both fields occupy
+    // the same location in hwc_layer_1_t union).
+    // To not confuse these devices we don't set background color and we
+    // make sure handle is a null pointer.
+    if (hasUnsupportedBackgroundColor()) {
+        hwc1Layer.handle = nullptr;
+    } else {
+        hwc1Layer.backgroundColor = mColor;
     }
 }
 
-void HWC2On1Adapter::Layer::applySidebandState(hwc_layer_1_t& hwc1Layer,
-        bool applyAllState)
-{
-    if (applyAllState || mSidebandStream.isDirty()) {
-        hwc1Layer.sidebandStream = mSidebandStream.getPendingValue();
-        mSidebandStream.latch();
-    }
+void HWC2On1Adapter::Layer::applySidebandState(hwc_layer_1_t& hwc1Layer) {
+    hwc1Layer.sidebandStream = mSidebandStream;
 }
 
-void HWC2On1Adapter::Layer::applyBufferState(hwc_layer_1_t& hwc1Layer)
-{
+void HWC2On1Adapter::Layer::applyBufferState(hwc_layer_1_t& hwc1Layer) {
     hwc1Layer.handle = mBuffer.getBuffer();
     hwc1Layer.acquireFenceFd = mBuffer.getFence();
 }
 
-void HWC2On1Adapter::Layer::applyCompositionType(hwc_layer_1_t& hwc1Layer,
-        bool applyAllState)
-{
+void HWC2On1Adapter::Layer::applyCompositionType(hwc_layer_1_t& hwc1Layer) {
     // HWC1 never supports color transforms or dataspaces and only sometimes
     // supports plane alpha (depending on the version). These require us to drop
     // some or all layers to client composition.
-    ALOGV("applyCompositionType");
-    ALOGV("mHasUnsupportedPlaneAlpha = %d", mHasUnsupportedPlaneAlpha);
-    ALOGV("mDisplay.hasColorTransform() = %d", mDisplay.hasColorTransform());
-    ALOGV("mHasUnsupportedBackgroundColor = %d", mHasUnsupportedBackgroundColor);
-
     if (mHasUnsupportedPlaneAlpha || mDisplay.hasColorTransform() ||
-        mHasUnsupportedBackgroundColor) {
+            hasUnsupportedBackgroundColor()) {
         hwc1Layer.compositionType = HWC_FRAMEBUFFER;
         hwc1Layer.flags = HWC_SKIP_LAYER;
         return;
     }
 
-    if (applyAllState || mCompositionType.isDirty()) {
-        hwc1Layer.flags = 0;
-        switch (mCompositionType.getPendingValue()) {
-            case Composition::Client:
+    hwc1Layer.flags = 0;
+    switch (mCompositionType) {
+        case Composition::Client:
+            hwc1Layer.compositionType = HWC_FRAMEBUFFER;
+            hwc1Layer.flags |= HWC_SKIP_LAYER;
+            break;
+        case Composition::Device:
+            hwc1Layer.compositionType = HWC_FRAMEBUFFER;
+            break;
+        case Composition::SolidColor:
+            // In theory the following line should work, but since the HWC1
+            // version of SurfaceFlinger never used HWC_BACKGROUND, HWC1
+            // devices may not work correctly. To be on the safe side, we
+            // fall back to client composition.
+            //
+            // hwc1Layer.compositionType = HWC_BACKGROUND;
+            hwc1Layer.compositionType = HWC_FRAMEBUFFER;
+            hwc1Layer.flags |= HWC_SKIP_LAYER;
+            break;
+        case Composition::Cursor:
+            hwc1Layer.compositionType = HWC_FRAMEBUFFER;
+            if (mDisplay.getDevice().getHwc1MinorVersion() >= 4) {
+                hwc1Layer.hints |= HWC_IS_CURSOR_LAYER;
+            }
+            break;
+        case Composition::Sideband:
+            if (mDisplay.getDevice().getHwc1MinorVersion() < 4) {
+                hwc1Layer.compositionType = HWC_SIDEBAND;
+            } else {
                 hwc1Layer.compositionType = HWC_FRAMEBUFFER;
                 hwc1Layer.flags |= HWC_SKIP_LAYER;
-                break;
-            case Composition::Device:
-                hwc1Layer.compositionType = HWC_FRAMEBUFFER;
-                break;
-            case Composition::SolidColor:
-                // In theory the following line should work, but since the HWC1
-                // version of SurfaceFlinger never used HWC_BACKGROUND, HWC1
-                // devices may not work correctly. To be on the safe side, we
-                // fall back to client composition.
-                //
-                // hwc1Layer.compositionType = HWC_BACKGROUND;
-                hwc1Layer.compositionType = HWC_FRAMEBUFFER;
-                hwc1Layer.flags |= HWC_SKIP_LAYER;
-                break;
-            case Composition::Cursor:
-                hwc1Layer.compositionType = HWC_FRAMEBUFFER;
-                if (mDisplay.getDevice().getHwc1MinorVersion() >= 4) {
-                    hwc1Layer.hints |= HWC_IS_CURSOR_LAYER;
-                }
-                break;
-            case Composition::Sideband:
-                if (mDisplay.getDevice().getHwc1MinorVersion() < 4) {
-                    hwc1Layer.compositionType = HWC_SIDEBAND;
-                } else {
-                    hwc1Layer.compositionType = HWC_FRAMEBUFFER;
-                    hwc1Layer.flags |= HWC_SKIP_LAYER;
-                }
-                break;
-            default:
-                hwc1Layer.compositionType = HWC_FRAMEBUFFER;
-                hwc1Layer.flags |= HWC_SKIP_LAYER;
-                break;
-        }
-        ALOGV("Layer %" PRIu64 " %s set to %d", mId,
-                to_string(mCompositionType.getPendingValue()).c_str(),
-                hwc1Layer.compositionType);
-        ALOGV_IF(hwc1Layer.flags & HWC_SKIP_LAYER, "    and skipping");
-        mCompositionType.latch();
+            }
+            break;
+        default:
+            hwc1Layer.compositionType = HWC_FRAMEBUFFER;
+            hwc1Layer.flags |= HWC_SKIP_LAYER;
+            break;
     }
+    ALOGV("Layer %" PRIu64 " %s set to %d", mId,
+            to_string(mCompositionType).c_str(),
+            hwc1Layer.compositionType);
+    ALOGV_IF(hwc1Layer.flags & HWC_SKIP_LAYER, "    and skipping");
 }
 
 // Adapter helpers
 
-void HWC2On1Adapter::populateCapabilities()
-{
+void HWC2On1Adapter::populateCapabilities() {
     ALOGV("populateCapabilities");
     if (mHwc1MinorVersion >= 3U) {
         int supportedTypes = 0;
@@ -2412,8 +2234,7 @@
     }
 }
 
-HWC2On1Adapter::Display* HWC2On1Adapter::getDisplay(hwc2_display_t id)
-{
+HWC2On1Adapter::Display* HWC2On1Adapter::getDisplay(hwc2_display_t id) {
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
 
     auto display = mDisplays.find(id);
@@ -2425,8 +2246,7 @@
 }
 
 std::tuple<HWC2On1Adapter::Layer*, Error> HWC2On1Adapter::getLayer(
-        hwc2_display_t displayId, hwc2_layer_t layerId)
-{
+        hwc2_display_t displayId, hwc2_layer_t layerId) {
     auto display = getDisplay(displayId);
     if (!display) {
         return std::make_tuple(static_cast<Layer*>(nullptr), Error::BadDisplay);
@@ -2444,22 +2264,19 @@
     return std::make_tuple(layer.get(), Error::None);
 }
 
-void HWC2On1Adapter::populatePrimary()
-{
+void HWC2On1Adapter::populatePrimary() {
     ALOGV("populatePrimary");
 
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
 
-    auto display =
-            std::make_shared<Display>(*this, HWC2::DisplayType::Physical);
+    auto display = std::make_shared<Display>(*this, HWC2::DisplayType::Physical);
     mHwc1DisplayMap[HWC_DISPLAY_PRIMARY] = display->getId();
     display->setHwc1Id(HWC_DISPLAY_PRIMARY);
     display->populateConfigs();
     mDisplays.emplace(display->getId(), std::move(display));
 }
 
-bool HWC2On1Adapter::prepareAllDisplays()
-{
+bool HWC2On1Adapter::prepareAllDisplays() {
     ATRACE_CALL();
 
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
@@ -2476,24 +2293,23 @@
         return false;
     }
 
+    // Build an array of hwc_display_contents_1 to call prepare() on HWC1.
+    mHwc1Contents.clear();
+
     // Always push the primary display
-    std::vector<HWC2On1Adapter::Display::HWC1Contents> requestedContents;
     auto primaryDisplayId = mHwc1DisplayMap[HWC_DISPLAY_PRIMARY];
     auto& primaryDisplay = mDisplays[primaryDisplayId];
-    auto primaryDisplayContents = primaryDisplay->cloneRequestedContents();
-    requestedContents.push_back(std::move(primaryDisplayContents));
+    mHwc1Contents.push_back(primaryDisplay->getDisplayContents());
 
     // Push the external display, if present
     if (mHwc1DisplayMap.count(HWC_DISPLAY_EXTERNAL) != 0) {
         auto externalDisplayId = mHwc1DisplayMap[HWC_DISPLAY_EXTERNAL];
         auto& externalDisplay = mDisplays[externalDisplayId];
-        auto externalDisplayContents =
-                externalDisplay->cloneRequestedContents();
-        requestedContents.push_back(std::move(externalDisplayContents));
+        mHwc1Contents.push_back(externalDisplay->getDisplayContents());
     } else {
         // Even if an external display isn't present, we still need to send
         // at least two displays down to HWC1
-        requestedContents.push_back(nullptr);
+        mHwc1Contents.push_back(nullptr);
     }
 
     // Push the hardware virtual display, if supported and present
@@ -2501,17 +2317,13 @@
         if (mHwc1DisplayMap.count(HWC_DISPLAY_VIRTUAL) != 0) {
             auto virtualDisplayId = mHwc1DisplayMap[HWC_DISPLAY_VIRTUAL];
             auto& virtualDisplay = mDisplays[virtualDisplayId];
-            auto virtualDisplayContents =
-                    virtualDisplay->cloneRequestedContents();
-            requestedContents.push_back(std::move(virtualDisplayContents));
+            mHwc1Contents.push_back(virtualDisplay->getDisplayContents());
         } else {
-            requestedContents.push_back(nullptr);
+            mHwc1Contents.push_back(nullptr);
         }
     }
 
-    mHwc1Contents.clear();
-    for (auto& displayContents : requestedContents) {
-        mHwc1Contents.push_back(displayContents.get());
+    for (auto& displayContents : mHwc1Contents) {
         if (!displayContents) {
             continue;
         }
@@ -2549,14 +2361,13 @@
 
         auto displayId = mHwc1DisplayMap[hwc1Id];
         auto& display = mDisplays[displayId];
-        display->setReceivedContents(std::move(requestedContents[hwc1Id]));
+        display->generateChanges();
     }
 
     return true;
 }
 
-Error HWC2On1Adapter::setAllDisplays()
-{
+Error HWC2On1Adapter::setAllDisplays() {
     ATRACE_CALL();
 
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
@@ -2602,14 +2413,13 @@
     return Error::None;
 }
 
-void HWC2On1Adapter::hwc1Invalidate()
-{
+void HWC2On1Adapter::hwc1Invalidate() {
     ALOGV("Received hwc1Invalidate");
 
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
 
     // If the HWC2-side callback hasn't been registered yet, buffer this until
-    // it is registered
+    // it is registered.
     if (mCallbacks.count(Callback::Refresh) == 0) {
         mHasPendingInvalidate = true;
         return;
@@ -2621,7 +2431,7 @@
         displays.emplace_back(displayPair.first);
     }
 
-    // Call back without the state lock held
+    // Call back without the state lock held.
     lock.unlock();
 
     auto refresh = reinterpret_cast<HWC2_PFN_REFRESH>(callbackInfo.pointer);
@@ -2630,14 +2440,13 @@
     }
 }
 
-void HWC2On1Adapter::hwc1Vsync(int hwc1DisplayId, int64_t timestamp)
-{
+void HWC2On1Adapter::hwc1Vsync(int hwc1DisplayId, int64_t timestamp) {
     ALOGV("Received hwc1Vsync(%d, %" PRId64 ")", hwc1DisplayId, timestamp);
 
     std::unique_lock<std::recursive_timed_mutex> lock(mStateMutex);
 
     // If the HWC2-side callback hasn't been registered yet, buffer this until
-    // it is registered
+    // it is registered.
     if (mCallbacks.count(Callback::Vsync) == 0) {
         mPendingVsyncs.emplace_back(hwc1DisplayId, timestamp);
         return;
@@ -2651,15 +2460,14 @@
     const auto& callbackInfo = mCallbacks[Callback::Vsync];
     auto displayId = mHwc1DisplayMap[hwc1DisplayId];
 
-    // Call back without the state lock held
+    // Call back without the state lock held.
     lock.unlock();
 
     auto vsync = reinterpret_cast<HWC2_PFN_VSYNC>(callbackInfo.pointer);
     vsync(callbackInfo.data, displayId, timestamp);
 }
 
-void HWC2On1Adapter::hwc1Hotplug(int hwc1DisplayId, int connected)
-{
+void HWC2On1Adapter::hwc1Hotplug(int hwc1DisplayId, int connected) {
     ALOGV("Received hwc1Hotplug(%d, %d)", hwc1DisplayId, connected);
 
     if (hwc1DisplayId != HWC_DISPLAY_EXTERNAL) {
@@ -2714,5 +2522,4 @@
             HWC2::Connection::Disconnected : HWC2::Connection::Connected;
     hotplug(callbackInfo.data, displayId, static_cast<int32_t>(hwc2Connected));
 }
-
 } // namespace android
diff --git a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h
index df33ec3..408bc41 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.h
@@ -134,11 +134,6 @@
                     const std::shared_ptr<Layer>& rhs);
     };
 
-    class DisplayContentsDeleter {
-        public:
-            void operator()(struct hwc_display_contents_1* contents);
-    };
-
     // The semantics of the fences returned by the device differ between
     // hwc1.set() and hwc2.present(). Read hwcomposer.h and hwcomposer2.h
     // for more information.
@@ -193,9 +188,6 @@
 
     class Display {
         public:
-            typedef std::unique_ptr<hwc_display_contents_1,
-                    DisplayContentsDeleter> HWC1Contents;
-
             Display(HWC2On1Adapter& device, HWC2::DisplayType type);
 
             hwc2_display_t getId() const { return mId; }
@@ -206,10 +198,6 @@
             void setHwc1Id(int32_t id) { mHwc1Id = id; }
             int32_t getHwc1Id() const { return mHwc1Id; }
 
-            void incDirty() { ++mDirtyCount; }
-            void decDirty() { --mDirtyCount; }
-            bool isDirty() const { return mDirtyCount > 0 || mZIsDirty; }
-
             // HWC2 Display functions
             HWC2::Error acceptChanges();
             HWC2::Error createLayer(hwc2_layer_t* outLayerId);
@@ -233,7 +221,14 @@
                     uint32_t* outNumElements, hwc2_layer_t* outLayers,
                     int32_t* outLayerRequests);
             HWC2::Error getType(int32_t* outType);
+
+            // Since HWC1 "presents" (called "set" in HWC1) all Displays
+            // at once, the first call to any Display::present will trigger
+            // present() on all Displays in the Device. Subsequent calls without
+            // first calling validate() are noop (except for duping/returning
+            // the retire fence).
             HWC2::Error present(int32_t* outRetireFence);
+
             HWC2::Error setActiveConfig(hwc2_config_t configId);
             HWC2::Error setClientTarget(buffer_handle_t target,
                     int32_t acquireFence, int32_t dataspace,
@@ -244,6 +239,10 @@
                     int32_t releaseFence);
             HWC2::Error setPowerMode(HWC2::PowerMode mode);
             HWC2::Error setVsyncEnabled(HWC2::Vsync enabled);
+
+            // Since HWC1 "validates" (called "prepare" in HWC1) all Displays
+            // at once, the first call to any Display::validate() will trigger
+            // validate() on all other Displays in the Device.
             HWC2::Error validate(uint32_t* outNumTypes,
                     uint32_t* outNumRequests);
 
@@ -256,10 +255,9 @@
             void populateConfigs(uint32_t width, uint32_t height);
 
             bool prepare();
-            HWC1Contents cloneRequestedContents() const;
 
             // Called after hwc.prepare() with responses from the device.
-            void setReceivedContents(HWC1Contents contents);
+            void generateChanges();
 
             bool hasChanges() const;
             HWC2::Error set(hwc_display_contents_1& hwcContents);
@@ -270,6 +268,13 @@
 
             std::string dump() const;
 
+            // Return a rect from the pool allocated during validate()
+            hwc_rect_t* GetRects(size_t numRects);
+
+            hwc_display_contents_1* getDisplayContents();
+
+            void markGeometryChanged() { mGeometryChanged = true; }
+            void resetGeometryMarker() { mGeometryChanged = false;}
         private:
             class Config {
                 public:
@@ -314,7 +319,7 @@
                     std::unordered_map<android_color_mode_t, uint32_t> mHwc1Ids;
             };
 
-            // Store changes requested from the device upon calling prepare().
+            // Stores changes requested from the device upon calling prepare().
             // Handles change request to:
             //   - Layer composition type.
             //   - Layer hints.
@@ -363,7 +368,9 @@
             void populateColorModes();
             void initializeActiveConfig();
 
-            void reallocateHwc1Contents();
+            // Creates a bi-directional mapping between index in HWC1
+            // prepare/set array and Layer object. Stores mapping in
+            // mHwc1LayerMap and also updates Layer's attribute mHwc1Id.
             void assignHwc1LayerIds();
 
             // Called after a response to prepare() has been received:
@@ -376,13 +383,16 @@
             void updateLayerRequests(const struct hwc_layer_1& hwc1Layer,
                     const Layer& layer);
 
+            // Set all fields in HWC1 comm array for layer containing the
+            // HWC_FRAMEBUFFER_TARGET (always the last layer).
             void prepareFramebufferTarget();
 
+            // Display ID generator.
             static std::atomic<hwc2_display_t> sNextId;
             const hwc2_display_t mId;
-            HWC2On1Adapter& mDevice;
 
-            std::atomic<size_t> mDirtyCount;
+
+            HWC2On1Adapter& mDevice;
 
             // The state of this display should only be modified from
             // SurfaceFlinger's main loop, with the exception of when dump is
@@ -395,15 +405,18 @@
             // which require locking.
             mutable std::recursive_mutex mStateMutex;
 
-            bool mZIsDirty;
+            // Allocate RAM able to store all layers and rects used for
+            // communication with HWC1. Place allocated RAM in variable
+            // mHwc1RequestedContents.
+            void allocateRequestedContents();
 
             // Array of structs exchanged between client and hwc1 device.
-            HWC1Contents mHwc1RequestedContents; // Sent to device upon calling prepare().
-            HWC1Contents mHwc1ReceivedContents;  // Returned by device after prepare().
-
+            // Sent to device upon calling prepare().
+            std::unique_ptr<hwc_display_contents_1> mHwc1RequestedContents;
+    private:
             DeferredFence mRetireFence;
 
-            // Will only be non-null after the layer has been validated but
+            // Will only be non-null after the Display has been validated and
             // before it has been presented
             std::unique_ptr<Changes> mChanges;
 
@@ -418,15 +431,34 @@
             HWC2::PowerMode mPowerMode;
             HWC2::Vsync mVsyncEnabled;
 
+            // Used to populate HWC1 HWC_FRAMEBUFFER_TARGET layer
             FencedBuffer mClientTarget;
+
+
             FencedBuffer mOutputBuffer;
 
             bool mHasColorTransform;
 
+            // All layers this Display is aware of.
             std::multiset<std::shared_ptr<Layer>, SortLayersByZ> mLayers;
+
+            // Mapping between layer index in array of hwc_display_contents_1*
+            // passed to HWC1 during validate/set and Layer object.
             std::unordered_map<size_t, std::shared_ptr<Layer>> mHwc1LayerMap;
+
+            // All communication with HWC1 via prepare/set is done with one
+            // alloc. This pointer is pointing to a pool of hwc_rect_t.
+            size_t mNumAvailableRects;
+            hwc_rect_t* mNextAvailableRect;
+
+            // True if any of the Layers contained in this Display have been
+            // updated with anything other than a buffer since last call to
+            // Display::set()
+            bool mGeometryChanged;
     };
 
+    // Utility template calling a Display object method directly based on the
+    // hwc2_display_t displayId parameter.
     template <typename ...Args>
     static int32_t callDisplayFunction(hwc2_device_t* device,
             hwc2_display_t displayId, HWC2::Error (Display::*member)(Args...),
@@ -468,7 +500,8 @@
     static int32_t setColorModeHook(hwc2_device_t* device,
             hwc2_display_t display, int32_t /*android_color_mode_t*/ intMode) {
         auto mode = static_cast<android_color_mode_t>(intMode);
-        return callDisplayFunction(device, display, &Display::setColorMode, mode);
+        return callDisplayFunction(device, display, &Display::setColorMode,
+                mode);
     }
 
     static int32_t setPowerModeHook(hwc2_device_t* device,
@@ -485,46 +518,6 @@
                 enabled);
     }
 
-    // Layer functions
-
-    template <typename T>
-    class LatchedState {
-        public:
-            LatchedState(Layer& parent, T initialValue)
-              : mParent(parent),
-                mPendingValue(initialValue),
-                mValue(initialValue) {}
-
-            void setPending(T value) {
-                if (value == mPendingValue) {
-                    return;
-                }
-                if (mPendingValue == mValue) {
-                    mParent.incDirty();
-                } else if (value == mValue) {
-                    mParent.decDirty();
-                }
-                mPendingValue = value;
-            }
-
-            T getValue() const { return mValue; }
-            T getPendingValue() const { return mPendingValue; }
-
-            bool isDirty() const { return mPendingValue != mValue; }
-
-            void latch() {
-                if (isDirty()) {
-                    mValue = mPendingValue;
-                    mParent.decDirty();
-                }
-            }
-
-        private:
-            Layer& mParent;
-            T mPendingValue;
-            T mValue;
-    };
-
     class Layer {
         public:
             explicit Layer(Display& display);
@@ -535,10 +528,6 @@
             hwc2_layer_t getId() const { return mId; }
             Display& getDisplay() const { return mDisplay; }
 
-            void incDirty() { if (mDirtyCount++ == 0) mDisplay.incDirty(); }
-            void decDirty() { if (--mDirtyCount == 0) mDisplay.decDirty(); }
-            bool isDirty() const { return mDirtyCount > 0; }
-
             // HWC2 Layer functions
             HWC2::Error setBuffer(buffer_handle_t buffer, int32_t acquireFence);
             HWC2::Error setCursorPosition(int32_t x, int32_t y);
@@ -558,7 +547,7 @@
             HWC2::Error setZ(uint32_t z);
 
             HWC2::Composition getCompositionType() const {
-                return mCompositionType.getValue();
+                return mCompositionType;
             }
             uint32_t getZ() const { return mZ; }
 
@@ -568,47 +557,57 @@
             void setHwc1Id(size_t id) { mHwc1Id = id; }
             size_t getHwc1Id() const { return mHwc1Id; }
 
-            void applyState(struct hwc_layer_1& hwc1Layer, bool applyAllState);
+            // Write state to HWC1 communication struct.
+            void applyState(struct hwc_layer_1& hwc1Layer);
 
             std::string dump() const;
 
+            std::size_t getNumVisibleRegions() { return mVisibleRegion.size(); }
+
+            std::size_t getNumSurfaceDamages() { return mSurfaceDamage.size(); }
+
+            // True if a layer cannot be properly rendered by the device due
+            // to usage of SolidColor (a.k.a BackgroundColor in HWC1).
+            bool hasUnsupportedBackgroundColor() {
+                return (mCompositionType == HWC2::Composition::SolidColor &&
+                        !mDisplay.getDevice().supportsBackgroundColor());
+            }
         private:
-            void applyCommonState(struct hwc_layer_1& hwc1Layer,
-                    bool applyAllState);
-            void applySolidColorState(struct hwc_layer_1& hwc1Layer,
-                    bool applyAllState);
-            void applySidebandState(struct hwc_layer_1& hwc1Layer,
-                    bool applyAllState);
+            void applyCommonState(struct hwc_layer_1& hwc1Layer);
+            void applySolidColorState(struct hwc_layer_1& hwc1Layer);
+            void applySidebandState(struct hwc_layer_1& hwc1Layer);
             void applyBufferState(struct hwc_layer_1& hwc1Layer);
-            void applyCompositionType(struct hwc_layer_1& hwc1Layer,
-                    bool applyAllState);
+            void applyCompositionType(struct hwc_layer_1& hwc1Layer);
 
             static std::atomic<hwc2_layer_t> sNextId;
             const hwc2_layer_t mId;
             Display& mDisplay;
-            size_t mDirtyCount;
 
             FencedBuffer mBuffer;
             std::vector<hwc_rect_t> mSurfaceDamage;
 
-            LatchedState<HWC2::BlendMode> mBlendMode;
-            LatchedState<hwc_color_t> mColor;
-            LatchedState<HWC2::Composition> mCompositionType;
-            LatchedState<hwc_rect_t> mDisplayFrame;
-            LatchedState<float> mPlaneAlpha;
-            LatchedState<const native_handle_t*> mSidebandStream;
-            LatchedState<hwc_frect_t> mSourceCrop;
-            LatchedState<HWC2::Transform> mTransform;
-            LatchedState<std::vector<hwc_rect_t>> mVisibleRegion;
+            HWC2::BlendMode mBlendMode;
+            hwc_color_t mColor;
+            HWC2::Composition mCompositionType;
+            hwc_rect_t mDisplayFrame;
+            float mPlaneAlpha;
+            const native_handle_t* mSidebandStream;
+            hwc_frect_t mSourceCrop;
+            HWC2::Transform mTransform;
+            std::vector<hwc_rect_t> mVisibleRegion;
+
             uint32_t mZ;
 
             DeferredFence mReleaseFence;
 
             size_t mHwc1Id;
             bool mHasUnsupportedPlaneAlpha;
-            bool mHasUnsupportedBackgroundColor;
     };
 
+    // Utility tempate calling a Layer object method based on ID parameters:
+    // hwc2_display_t displayId
+    // and
+    // hwc2_layer_t layerId
     template <typename ...Args>
     static int32_t callLayerFunction(hwc2_device_t* device,
             hwc2_display_t displayId, hwc2_layer_t layerId,
@@ -677,6 +676,7 @@
     std::vector<struct hwc_display_contents_1*> mHwc1Contents;
     HWC2::Error setAllDisplays();
 
+    // Callbacks
     void hwc1Invalidate();
     void hwc1Vsync(int hwc1DisplayId, int64_t timestamp);
     void hwc1Hotplug(int hwc1DisplayId, int connected);
@@ -698,6 +698,8 @@
     // callbacks or dump
 
     std::map<hwc2_layer_t, std::shared_ptr<Layer>> mLayers;
+
+    // A HWC1 supports only one virtual display.
     std::shared_ptr<Display> mHwc1VirtualDisplay;
 
     // These are potentially accessed from multiple threads, and are protected
@@ -712,10 +714,19 @@
     };
     std::unordered_map<HWC2::Callback, CallbackInfo> mCallbacks;
     bool mHasPendingInvalidate;
+
+    // There is a small gap between the time the HWC1 module is started and
+    // when the callbacks for vsync and hotplugs are registered by the
+    // HWC2on1Adapter. To prevent losing events they are stored in these arrays
+    // and fed to the callback as soon as possible.
     std::vector<std::pair<int, int64_t>> mPendingVsyncs;
     std::vector<std::pair<int, int>> mPendingHotplugs;
 
+    // Mapping between HWC1 display id and Display objects.
     std::map<hwc2_display_t, std::shared_ptr<Display>> mDisplays;
+
+    // Map HWC1 display type (HWC_DISPLAY_PRIMARY, HWC_DISPLAY_EXTERNAL,
+    // HWC_DISPLAY_VIRTUAL) to Display IDs generated by HWC2on1Adapter objects.
     std::unordered_map<int, hwc2_display_t> mHwc1DisplayMap;
 };
 
diff --git a/services/surfaceflinger/Effects/Daltonizer.cpp b/services/surfaceflinger/Effects/Daltonizer.cpp
index a104e8f..c953c68 100644
--- a/services/surfaceflinger/Effects/Daltonizer.cpp
+++ b/services/surfaceflinger/Effects/Daltonizer.cpp
@@ -15,7 +15,7 @@
  */
 
 #include "Daltonizer.h"
-#include <ui/mat4.h>
+#include <math/mat4.h>
 
 namespace android {
 
diff --git a/services/surfaceflinger/Effects/Daltonizer.h b/services/surfaceflinger/Effects/Daltonizer.h
index d21b155..2fb60e9 100644
--- a/services/surfaceflinger/Effects/Daltonizer.h
+++ b/services/surfaceflinger/Effects/Daltonizer.h
@@ -17,7 +17,7 @@
 #ifndef SF_EFFECTS_DALTONIZER_H_
 #define SF_EFFECTS_DALTONIZER_H_
 
-#include <ui/mat4.h>
+#include <math/mat4.h>
 
 namespace android {
 
diff --git a/services/surfaceflinger/RenderEngine/RenderEngine.h b/services/surfaceflinger/RenderEngine/RenderEngine.h
index 0259881..d19137b 100644
--- a/services/surfaceflinger/RenderEngine/RenderEngine.h
+++ b/services/surfaceflinger/RenderEngine/RenderEngine.h
@@ -23,7 +23,7 @@
 
 #include <EGL/egl.h>
 #include <EGL/eglext.h>
-#include <ui/mat4.h>
+#include <math/mat4.h>
 #include <Transform.h>
 
 #define EGL_NO_CONFIG ((EGLConfig)0)
diff --git a/services/surfaceflinger/RenderEngine/Texture.h b/services/surfaceflinger/RenderEngine/Texture.h
index 8cf85fc..a07e0c3 100644
--- a/services/surfaceflinger/RenderEngine/Texture.h
+++ b/services/surfaceflinger/RenderEngine/Texture.h
@@ -15,7 +15,7 @@
  */
 
 #include <stdint.h>
-#include <ui/mat4.h>
+#include <math/mat4.h>
 
 #ifndef SF_RENDER_ENGINE_TEXTURE_H
 #define SF_RENDER_ENGINE_TEXTURE_H
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 215628d..2781e8c 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -2539,11 +2539,18 @@
     const ssize_t index = (p != nullptr) ? p->removeChild(layer) :
         mCurrentState.layersSortedByZ.remove(layer);
 
-    if (index < 0) {
+    // As a matter of normal operation, the LayerCleaner will produce a second
+    // attempt to remove the surface. The Layer will be kept alive in mDrawingState
+    // so we will succeed in promoting it, but it's already been removed
+    // from mCurrentState. As long as we can find it in mDrawingState we have no problem
+    // otherwise something has gone wrong and we are leaking the layer.
+    if (index < 0 && mDrawingState.layersSortedByZ.indexOf(layer) < 0) {
         ALOGE("Failed to find layer (%s) in layer parent (%s).",
                 layer->getName().string(),
                 (p != nullptr) ? p->getName().string() : "no-parent");
         return BAD_VALUE;
+    } else if (index < 0) {
+        return NO_ERROR;
     }
 
     mLayersPendingRemoval.add(layer);
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index f52bd2d..c43786a 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -40,7 +40,7 @@
 
 #include <ui/FenceTime.h>
 #include <ui/PixelFormat.h>
-#include <ui/mat4.h>
+#include <math/mat4.h>
 
 #include <gui/FrameTimestamps.h>
 #include <gui/ISurfaceComposer.h>
diff --git a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
index e6ab29a..5aaaab1 100644
--- a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
+++ b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp
@@ -2325,11 +2325,18 @@
     const ssize_t index = (p != nullptr) ? p->removeChild(layer) :
              mCurrentState.layersSortedByZ.remove(layer);
 
-    if (index < 0) {
+    // As a matter of normal operation, the LayerCleaner will produce a second
+    // attempt to remove the surface. The Layer will be kept alive in mDrawingState
+    // so we will succeed in promoting it, but it's already been removed
+    // from mCurrentState. As long as we can find it in mDrawingState we have no problem
+    // otherwise something has gone wrong and we are leaking the layer.
+    if (index < 0 && mDrawingState.layersSortedByZ.indexOf(layer) < 0) {
         ALOGE("Failed to find layer (%s) in layer parent (%s).",
                 layer->getName().string(),
                 (p != nullptr) ? p->getName().string() : "no-parent");
         return BAD_VALUE;
+    } else if (index < 0) {
+        return NO_ERROR;
     }
 
     mLayersPendingRemoval.add(layer);
diff --git a/services/surfaceflinger/Transform.h b/services/surfaceflinger/Transform.h
index 66463a0..6640a13 100644
--- a/services/surfaceflinger/Transform.h
+++ b/services/surfaceflinger/Transform.h
@@ -22,8 +22,8 @@
 
 #include <ui/Point.h>
 #include <ui/Rect.h>
-#include <ui/vec2.h>
-#include <ui/vec3.h>
+#include <math/vec2.h>
+#include <math/vec3.h>
 
 #include <hardware/hardware.h>
 
diff --git a/services/surfaceflinger/main_surfaceflinger.cpp b/services/surfaceflinger/main_surfaceflinger.cpp
index 53a63bd..f151087 100644
--- a/services/surfaceflinger/main_surfaceflinger.cpp
+++ b/services/surfaceflinger/main_surfaceflinger.cpp
@@ -45,12 +45,10 @@
 
     set_sched_policy(0, SP_FOREGROUND);
 
-#ifdef ENABLE_CPUSETS
     // Put most SurfaceFlinger threads in the system-background cpuset
     // Keeps us from unnecessarily using big cores
     // Do this after the binder thread pool init
-    set_cpuset_policy(0, SP_SYSTEM);
-#endif
+    if (cpusets_enabled()) set_cpuset_policy(0, SP_SYSTEM);
 
     // initialize before clients can connect
     flinger->init();
diff --git a/services/vr/sensord/pose_service.cpp b/services/vr/sensord/pose_service.cpp
index 34bcccf..40eb21d 100644
--- a/services/vr/sensord/pose_service.cpp
+++ b/services/vr/sensord/pose_service.cpp
@@ -20,9 +20,7 @@
 #include <pdx/default_transport/service_endpoint.h>
 #include <private/dvr/benchmark.h>
 #include <private/dvr/clock_ns.h>
-#include <private/dvr/linear_pose_predictor.h>
 #include <private/dvr/platform_defines.h>
-#include <private/dvr/polynomial_pose_predictor.h>
 #include <private/dvr/pose-ipc.h>
 #include <private/dvr/sensor_constants.h>
 #include <utils/Trace.h>
@@ -234,9 +232,11 @@
 
   switch (property_get_int32(kPredictorTypeProp, 0)) {
     case 1:
-      pose_predictor_ = std::make_unique<QuadricPosePredictor>();
+      pose_predictor_ = posepredictor::Predictor::Create(
+          posepredictor::PredictorType::Quadric);
     default:
-      pose_predictor_ = std::make_unique<LinearPosePredictor>();
+      pose_predictor_ = posepredictor::Predictor::Create(
+          posepredictor::PredictorType::Linear);
   }
 
   enable_pose_recording_ = property_get_bool(kEnablePoseRecordProp, 0) == 1;
@@ -336,10 +336,8 @@
     pose_timestamp = GetSystemClockNs() - 1;
 
   // Feed the sample to the predictor
-  pose_predictor_->Add(PosePredictor::Sample{.position = start_t_head,
-                                             .orientation = start_q_head,
-                                             .time_ns = pose_timestamp},
-                       &last_known_pose_);
+  AddPredictorPose(pose_predictor_.get(), start_t_head, start_q_head,
+                   pose_timestamp, &last_known_pose_);
 
   // Store one extra value, because the application is working on the next
   // frame and expects the minimum count from that frame on.
@@ -361,9 +359,9 @@
 
     // Make a pose prediction
     if (enable_pose_prediction_) {
-      pose_predictor_->Predict(target_time,
-                               target_time + right_eye_photon_offset_ns_,
-                               mapped_pose_buffer_->ring + index);
+      PredictPose(pose_predictor_.get(), target_time,
+                  target_time + right_eye_photon_offset_ns_,
+                  mapped_pose_buffer_->ring + index);
     } else {
       mapped_pose_buffer_->ring[index] = last_known_pose_;
     }
diff --git a/services/vr/sensord/pose_service.h b/services/vr/sensord/pose_service.h
index 455f98a..899d5fb 100644
--- a/services/vr/sensord/pose_service.h
+++ b/services/vr/sensord/pose_service.h
@@ -12,7 +12,7 @@
 #include <pdx/service.h>
 #include <private/dvr/buffer_hub_client.h>
 #include <private/dvr/pose_client_internal.h>
-#include <private/dvr/pose_predictor.h>
+#include <private/dvr/dvr_pose_predictor.h>
 #include <private/dvr/ring_buffer.h>
 
 #include "sensor_fusion.h"
@@ -118,7 +118,7 @@
   bool enable_external_pose_ = false;
 
   // The predictor to extrapolate pose samples.
-  std::unique_ptr<PosePredictor> pose_predictor_;
+  std::unique_ptr<posepredictor::Predictor> pose_predictor_;
 
   // Pose ring buffer.
   std::shared_ptr<BufferProducer> ring_buffer_;
diff --git a/services/vr/vr_window_manager/composer/1.0/Android.bp b/services/vr/vr_window_manager/composer/1.0/Android.bp
index e3e47ff..5e791a7 100644
--- a/services/vr/vr_window_manager/composer/1.0/Android.bp
+++ b/services/vr/vr_window_manager/composer/1.0/Android.bp
@@ -28,20 +28,20 @@
     out: [
         "android/dvr/composer/1.0/IVrComposerClient.h",
         "android/dvr/composer/1.0/IHwVrComposerClient.h",
-        "android/dvr/composer/1.0/BnVrComposerClient.h",
-        "android/dvr/composer/1.0/BpVrComposerClient.h",
+        "android/dvr/composer/1.0/BnHwVrComposerClient.h",
+        "android/dvr/composer/1.0/BpHwVrComposerClient.h",
         "android/dvr/composer/1.0/BsVrComposerClient.h",
 
         "android/dvr/composer/1.0/IVrComposerView.h",
         "android/dvr/composer/1.0/IHwVrComposerView.h",
-        "android/dvr/composer/1.0/BnVrComposerView.h",
-        "android/dvr/composer/1.0/BpVrComposerView.h",
+        "android/dvr/composer/1.0/BnHwVrComposerView.h",
+        "android/dvr/composer/1.0/BpHwVrComposerView.h",
         "android/dvr/composer/1.0/BsVrComposerView.h",
 
         "android/dvr/composer/1.0/IVrComposerCallback.h",
         "android/dvr/composer/1.0/IHwVrComposerCallback.h",
-        "android/dvr/composer/1.0/BnVrComposerCallback.h",
-        "android/dvr/composer/1.0/BpVrComposerCallback.h",
+        "android/dvr/composer/1.0/BnHwVrComposerCallback.h",
+        "android/dvr/composer/1.0/BpHwVrComposerCallback.h",
         "android/dvr/composer/1.0/BsVrComposerCallback.h",
     ],
 }
diff --git a/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp b/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
index 9642224..264ee1c 100644
--- a/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
+++ b/services/vr/vr_window_manager/composer/impl/vr_hwc.cpp
@@ -48,6 +48,7 @@
   int32_t format = 0;
 
   GraphicBufferMapper& mapper = GraphicBufferMapper::get();
+  // Need to register |handle| otherwise we can't read its properties.
   if (mapper.registerBuffer(handle) != OK) {
     ALOGE("Failed to register buffer");
     return nullptr;
@@ -66,9 +67,15 @@
   // capability. Otherwise assume a count of 1.
   mapper.getLayerCount(handle, &layer_count);
 
+  // NOTE: Can't re-use |handle| since we don't own it.
   sp<GraphicBuffer> buffer = new GraphicBuffer(
       width, height, format, layer_count, producer_usage, consumer_usage,
       stride, native_handle_clone(handle), true);
+  // Need to register the cloned buffer otherwise it can't be used later on.
+  if (mapper.registerBuffer(buffer.get()) != OK) {
+    ALOGE("Failed to register cloned buffer");
+    return nullptr;
+  }
 
   return buffer;
 }