Merge "libdvrgraphics: Add libarect as whole static lib to fix compile"
diff --git a/cmds/lshal/Timeout.h b/cmds/lshal/Timeout.h
index bf883c0..001c3d6 100644
--- a/cmds/lshal/Timeout.h
+++ b/cmds/lshal/Timeout.h
@@ -69,7 +69,7 @@
     auto boundFunc = std::bind(std::forward<Function>(func),
             interfaceObject.get(), std::forward<Args>(args)...);
     bool success = timeout(IPC_CALL_WAIT, [&ret, &boundFunc] {
-        ret = boundFunc();
+        ret = std::move(boundFunc());
     });
     if (!success) {
         return Status::fromStatusT(TIMED_OUT);
diff --git a/include/private/ui/RegionHelper.h b/include/private/ui/RegionHelper.h
index c7c3160..a22b2cb 100644
--- a/include/private/ui/RegionHelper.h
+++ b/include/private/ui/RegionHelper.h
@@ -27,10 +27,10 @@
 class region_operator
 {
 public:
-    typedef typename RECT::value_type TYPE;    
-    static const TYPE max_value = 0x7FFFFFF;
+    typedef typename RECT::value_type TYPE;
+    static const TYPE max_value = std::numeric_limits<TYPE>::max();
 
-    /* 
+    /*
      * Common boolean operations:
      * value is computed as 0b101 op 0b110
      *    other boolean operation are possible, simply compute
diff --git a/include/ui/ColorSpace.h b/include/ui/ColorSpace.h
index b1863df..e9260b5 100644
--- a/include/ui/ColorSpace.h
+++ b/include/ui/ColorSpace.h
@@ -212,14 +212,14 @@
         return v;
     }
 
-    const std::string mName;
+    std::string mName;
 
-    const mat3 mRGBtoXYZ;
-    const mat3 mXYZtoRGB;
+    mat3 mRGBtoXYZ;
+    mat3 mXYZtoRGB;
 
-    const transfer_function mOETF;
-    const transfer_function mEOTF;
-    const clamping_function mClamper;
+    transfer_function mOETF;
+    transfer_function mEOTF;
+    clamping_function mClamper;
 
     std::array<float2, 3> mPrimaries;
     float2 mWhitePoint;
diff --git a/libs/ui/tools/Android.bp b/libs/ui/tools/Android.bp
new file mode 100644
index 0000000..fb46c2b
--- /dev/null
+++ b/libs/ui/tools/Android.bp
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+    name: "libui_tools_default",
+    clang_cflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+}
+
+cc_binary {
+    name: "lutgen",
+    cppflags: [
+        "-Wall",
+        "-Wextra",
+        "-Werror",
+    ],
+    shared_libs: ["libui"],
+    srcs: ["lutgen.cpp"],
+}
diff --git a/libs/ui/tools/lutgen.cpp b/libs/ui/tools/lutgen.cpp
new file mode 100644
index 0000000..97b0822
--- /dev/null
+++ b/libs/ui/tools/lutgen.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <string>
+
+#include <getopt.h>
+
+#include <ui/ColorSpace.h>
+
+using namespace android;
+using namespace std;
+
+uint32_t gSize = 32;
+ColorSpace gColorSpaceSrc = ColorSpace::DisplayP3();
+ColorSpace gColorSpaceDst = ColorSpace::extendedSRGB();
+string gNameSrc = "DisplayP3";
+string gNameDst = "extendedSRGB";
+
+static void printHelp() {
+    cout << "lutgen -d SIZE -s SOURCE -t TARGET <lut file>" << endl;
+    cout << endl;
+    cout << "Generate a 3D LUT to convert between two color spaces." << endl;
+    cout << endl;
+    cout << "If <lut file> ends in .inc, data is generated without the array declaration." << endl;
+    cout << endl;
+    cout << "Options:" << endl;
+    cout << "  --help, -h" << endl;
+    cout << "    print this message" << endl;
+    cout << "  --dimension=, -d" << endl;
+    cout << "    the dimension of the 3D LUT. Example: 17 for a 17x17x17 LUT. 32 by default" << endl;
+    cout << "  --source=COLORSPACE, -s" << endl;
+    cout << "    the source color space, see below for available names. DisplayP3 by default" << endl;
+    cout << "  --target=COLORSPACE, -t" << endl;
+    cout << "    the target color space, see below for available names. extendedSRGB by default" << endl;
+    cout << endl;
+    cout << "Colorspace names:" << endl;
+    cout << "    sRGB" << endl;
+    cout << "    linearSRGB" << endl;
+    cout << "    extendedSRGB" << endl;
+    cout << "    linearExtendedSRGB" << endl;
+    cout << "    NTSC" << endl;
+    cout << "    BT709" << endl;
+    cout << "    BT2020" << endl;
+    cout << "    AdobeRGB" << endl;
+    cout << "    ProPhotoRGB" << endl;
+    cout << "    DisplayP3" << endl;
+    cout << "    DCIP3" << endl;
+    cout << "    ACES" << endl;
+    cout << "    ACEScg" << endl;
+}
+
+static const ColorSpace findColorSpace(const string& name) {
+    if (name == "linearSRGB") return ColorSpace::linearSRGB();
+    if (name == "extendedSRGB") return ColorSpace::extendedSRGB();
+    if (name == "linearExtendedSRGB") return ColorSpace::linearExtendedSRGB();
+    if (name == "NTSC") return ColorSpace::NTSC();
+    if (name == "BT709") return ColorSpace::BT709();
+    if (name == "BT2020") return ColorSpace::BT2020();
+    if (name == "AdobeRGB") return ColorSpace::AdobeRGB();
+    if (name == "ProPhotoRGB") return ColorSpace::ProPhotoRGB();
+    if (name == "DisplayP3") return ColorSpace::DisplayP3();
+    if (name == "DCIP3") return ColorSpace::DCIP3();
+    if (name == "ACES") return ColorSpace::ACES();
+    if (name == "ACEScg") return ColorSpace::ACEScg();
+    return ColorSpace::sRGB();
+}
+
+static int handleCommandLineArgments(int argc, char* argv[]) {
+    static constexpr const char* OPTSTR = "h:d:s:t:";
+    static const struct option OPTIONS[] = {
+            { "help",       no_argument,       0, 'h' },
+            { "dimension",  required_argument, 0, 'd' },
+            { "source",     required_argument, 0, 's' },
+            { "target",     required_argument, 0, 't' },
+            { 0, 0, 0, 0 }  // termination of the option list
+    };
+
+    int opt;
+    int index = 0;
+
+    while ((opt = getopt_long(argc, argv, OPTSTR, OPTIONS, &index)) >= 0) {
+        string arg(optarg ? optarg : "");
+        switch (opt) {
+            default:
+            case 'h':
+                printHelp();
+                exit(0);
+                break;
+            case 'd':
+                gSize = max(2, min(stoi(arg), 256));
+                break;
+            case 's':
+                gNameSrc = arg;
+                gColorSpaceSrc = findColorSpace(arg);
+                break;
+            case 't':
+                gNameDst = arg;
+                gColorSpaceDst = findColorSpace(arg);
+                break;
+        }
+    }
+
+    return optind;
+}
+
+int main(int argc, char* argv[]) {
+    int optionIndex = handleCommandLineArgments(argc, argv);
+    int numArgs = argc - optionIndex;
+
+    if (numArgs < 1) {
+        printHelp();
+        return 1;
+    }
+    
+    bool isInclude = false;
+
+    string filename(argv[optionIndex]);
+    size_t index = filename.find_last_of('.');
+
+    if (index != string::npos) {
+        string extension(filename.substr(index + 1));
+        isInclude = extension == "inc";
+    }
+
+    ofstream outputStream(filename, ios::trunc);
+    if (outputStream.good()) {
+        auto lut = ColorSpace::createLUT(gSize, gColorSpaceSrc, gColorSpaceDst);
+        auto data = lut.get();
+
+        outputStream << "// generated with lutgen " << filename.c_str() << endl;
+        outputStream << "// 3D LUT stored as an RGB16F texture, in GL order" << endl;
+        outputStream << "// Size is " << gSize << "x" << gSize << "x" << gSize << endl;
+
+        string src(gNameSrc);
+        string dst(gNameDst);
+
+        if (!isInclude) {
+            transform(src.begin(), src.end(), src.begin(), ::toupper);
+            transform(dst.begin(), dst.end(), dst.begin(), ::toupper);
+
+            outputStream << "const size_t LUT_" << src << "_TO_" << dst << "_SIZE = " << gSize << endl;
+            outputStream << "const uint16_t LUT_" << src << "_TO_" << dst << "[] = {";
+        } else {
+            outputStream << "// From " << src << " to " << dst << endl;
+        }
+
+        for (size_t z = 0; z < gSize; z++) {
+            for (size_t y = 0; y < gSize; y++) {
+                for (size_t x = 0; x < gSize; x++) {
+                    if (x % 4 == 0) outputStream << endl << "    ";
+
+                    half3 rgb = half3(*data++);
+
+                    const uint16_t r = rgb.r.getBits();
+                    const uint16_t g = rgb.g.getBits();
+                    const uint16_t b = rgb.b.getBits();
+
+                    outputStream << "0x" << setfill('0') << setw(4) << hex << r << ", ";
+                    outputStream << "0x" << setfill('0') << setw(4) << hex << g << ", ";
+                    outputStream << "0x" << setfill('0') << setw(4) << hex << b << ", ";
+                }
+            }
+        }
+
+        if (!isInclude) {
+            outputStream << endl << "}; // end LUT" << endl;
+        }
+
+        outputStream << endl;
+        outputStream.flush();
+        outputStream.close();
+    } else {
+        cerr << "Could not write to file: " << filename << endl;
+        return 1;
+
+    }
+
+    return 0;
+}
diff --git a/libs/vr/libposepredictor/Android.mk b/libs/vr/libposepredictor/Android.mk
old mode 100644
new mode 100755
index 030f79c..761fe06
--- a/libs/vr/libposepredictor/Android.mk
+++ b/libs/vr/libposepredictor/Android.mk
@@ -15,7 +15,10 @@
 LOCAL_PATH := $(call my-dir)
 
 sourceFiles := \
+        pose_predictor.cpp \
+        buffered_predictor.cpp \
         linear_pose_predictor.cpp \
+        polynomial_pose_predictor.cpp \
 
 includeFiles := \
         $(LOCAL_PATH)/include
@@ -43,7 +46,9 @@
 include $(CLEAR_VARS)
 LOCAL_MODULE_TAGS := optional
 LOCAL_SRC_FILES := \
+        pose_predictor_tests.cpp \
         linear_pose_predictor_tests.cpp \
+        polynomial_pose_predictor_tests.cpp \
 
 LOCAL_STATIC_LIBRARIES := libposepredictor $(staticLibraries)
 LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
diff --git a/libs/vr/libposepredictor/buffered_predictor.cpp b/libs/vr/libposepredictor/buffered_predictor.cpp
new file mode 100644
index 0000000..08fd524
--- /dev/null
+++ b/libs/vr/libposepredictor/buffered_predictor.cpp
@@ -0,0 +1,40 @@
+#include <private/dvr/buffered_predictor.h>
+
+namespace android {
+namespace dvr {
+
+BufferedPredictor::BufferedPredictor(size_t buffer_size) {
+  buffer_.resize(buffer_size);
+}
+
+void BufferedPredictor::BufferSample(const Sample& sample) {
+  const auto& prev_sample = buffer_[current_pose_index_];
+
+  // If we are updating a sample (the same time stamp), do not advance the
+  // counter.
+  if (sample.time_ns != prev_sample.time_ns) {
+    current_pose_index_ = (current_pose_index_ + 1) % buffer_.size();
+  }
+
+  buffer_[current_pose_index_] = sample;
+
+  // Make sure the subsequent orientations are the closest in quaternion space.
+  if (PrevSample(1).orientation.coeffs().dot(sample.orientation.coeffs()) < 0) {
+    // Flip the quaternion to be closest to the previous sample.
+    buffer_[current_pose_index_].orientation =
+        quatd(-sample.orientation.w(), -sample.orientation.x(),
+              -sample.orientation.y(), -sample.orientation.z());
+  }
+
+  ++num_poses_added_;
+}
+
+const PosePredictor::Sample& BufferedPredictor::PrevSample(size_t index) const {
+  // We must not request a pose too far in the past.
+  assert(index < buffer_.size());
+  return buffer_[(current_pose_index_ - index + buffer_.size()) %
+                 buffer_.size()];
+}
+
+}  // namespace dvr
+}  // namespace android
diff --git a/libs/vr/libposepredictor/include/private/dvr/buffered_predictor.h b/libs/vr/libposepredictor/include/private/dvr/buffered_predictor.h
new file mode 100644
index 0000000..89d89e1
--- /dev/null
+++ b/libs/vr/libposepredictor/include/private/dvr/buffered_predictor.h
@@ -0,0 +1,42 @@
+#ifndef ANDROID_DVR_BUFFERED_PREDICTOR_H_
+#define ANDROID_DVR_BUFFERED_PREDICTOR_H_
+
+#include <vector>
+
+#include "pose_predictor.h"
+
+namespace android {
+namespace dvr {
+
+// Keeps the previous n poses around in a ring buffer.
+// The orientations are also unrolled so that a . b > 0 for two subsequent
+// quaternions a and b.
+class BufferedPredictor : public PosePredictor {
+ public:
+  BufferedPredictor(size_t buffer_size);
+  ~BufferedPredictor() = default;
+
+ protected:
+  // Add a pose sample into the buffer.
+  void BufferSample(const Sample& sample);
+
+  // Grab a previous sample.
+  // index = 0: last sample
+  // index = 1: the one before that
+  // ...
+  const Sample& PrevSample(size_t index) const;
+
+  // Where we keep the last n poses.
+  std::vector<Sample> buffer_;
+
+  // Where the last valid pose is in the buffer.
+  size_t current_pose_index_ = 0;
+
+  // The number of poses we have added.
+  size_t num_poses_added_ = 0;
+};
+
+}  // namespace dvr
+}  // namespace android
+
+#endif  // ANDROID_DVR_BUFFERED_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/private/dvr/polynomial_pose_predictor.h b/libs/vr/libposepredictor/include/private/dvr/polynomial_pose_predictor.h
new file mode 100644
index 0000000..7abb486
--- /dev/null
+++ b/libs/vr/libposepredictor/include/private/dvr/polynomial_pose_predictor.h
@@ -0,0 +1,219 @@
+#ifndef ANDROID_DVR_POLYNOMIAL_POSE_PREDICTOR_H_
+#define ANDROID_DVR_POLYNOMIAL_POSE_PREDICTOR_H_
+
+#include <vector>
+
+#include <Eigen/Dense>
+
+#include "buffered_predictor.h"
+
+namespace android {
+namespace dvr {
+
+// Make a polynomial prediction of the form
+// y = coefficients_[0] + coefficients_[1] * t + coefficients_[2] * t^2 + ...
+// where t is time and y is the position and orientation.
+// We recompute the coefficients whenever we add a new sample using
+// training_window previous samples.
+template <size_t PolynomialDegree, size_t TrainingWindow>
+class PolynomialPosePredictor : public BufferedPredictor {
+ public:
+  PolynomialPosePredictor(double regularization = 1e-9)
+      : BufferedPredictor(TrainingWindow), regularization_(regularization) {
+    static_assert(PolynomialDegree + 1 >= TrainingWindow,
+                  "Underconstrained polynomial regressor");
+  }
+
+  ~PolynomialPosePredictor() = default;
+
+  // We convert pose samples into a vector for matrix arithmetic using this
+  // mapping.
+  enum Components {
+    kPositionX = 0,
+    kPositionY,
+    kPositionZ,
+    kOrientationX,
+    kOrientationY,
+    kOrientationZ,
+    kOrientationW,
+    kNumComponents
+  };
+
+  // Add a new sample.
+  void Add(const Sample& sample, DvrPoseAsync* out_pose) override {
+    // Add the sample to the ring buffer.
+    BufferedPredictor::BufferSample(sample);
+
+    Eigen::Matrix<double, TrainingWindow, kNumComponents> values;
+
+    // Get the pose samples into matrices for fitting.
+    double t_vector[TrainingWindow];
+    for (size_t i = 0; i < TrainingWindow; ++i) {
+      const auto& prev_sample = PrevSample(i);
+
+      t_vector[i] = NsToT(prev_sample.time_ns);
+
+      // Save the values we will be fitting to at each sample time.
+      values(i, kPositionX) = prev_sample.position.x();
+      values(i, kPositionY) = prev_sample.position.y();
+      values(i, kPositionZ) = prev_sample.position.z();
+      values(i, kOrientationX) = prev_sample.orientation.x();
+      values(i, kOrientationY) = prev_sample.orientation.y();
+      values(i, kOrientationZ) = prev_sample.orientation.z();
+      values(i, kOrientationW) = prev_sample.orientation.w();
+    }
+
+    // Some transient matrices for solving for coefficient matrix.
+    Eigen::Matrix<double, PolynomialDegree + 1, PolynomialDegree + 1> M;
+    Eigen::Vector<double, PolynomialDegree + 1> d;
+    Eigen::Vector<double, PolynomialDegree + 1> p;
+
+    // Create a polynomial fit for each component.
+    for (size_t component = 0; component < kNumComponents; ++component) {
+      // A = [ 1 t t^2 ... ]'
+      // x = [ coefficients[0] coefficients[1] .... ]'
+      // b = [ position.x ]'
+      // We would like to solve A' x + regularization * I = b'
+      // given the samples we have in our training window.
+      //
+      // The loop below will compute:
+      // M = A' * A
+      // d = A' * b
+      // so we can solve M * coefficients + regularization * I = b
+
+      M.setIdentity();
+      d.setZero();
+      p[0] = 1;
+
+      // M = regularization * I
+      M = M * regularization_;
+
+      // Accumulate the poses in the training window.
+      for (size_t i = 0; i < TrainingWindow; ++i) {
+        // Compute the polynomial at this sample.
+        for (size_t j = 1; j <= PolynomialDegree; ++j) {
+          p[j] = p[j - 1] * t_vector[i];
+        }
+
+        // Accumulate the left and right hand sides.
+        M = M + p * p.transpose();
+        d = d + p * values(i, component);
+      }
+
+      // M is symmetric, positive semi-definite.
+      // Note: This is not the most accurate solver out there but is fast.
+      coefficients_.row(component) = Eigen::LLT<Eigen::MatrixXd>(M).solve(d);
+    }
+
+    // Fill out the out_pose at this sample.
+    Predict(sample.time_ns, sample.time_ns, out_pose);
+  }
+
+  // Predict using the polynomial coefficients.
+  void Predict(int64_t left_time_ns, int64_t right_time_ns,
+               DvrPoseAsync* out_pose) const override {
+    // Predict the left side.
+    const auto left = SamplePolynomial(left_time_ns);
+    out_pose->translation = {static_cast<float>(left[kPositionX]),
+                             static_cast<float>(left[kPositionY]),
+                             static_cast<float>(left[kPositionZ])};
+    out_pose->orientation = normalize(left[kOrientationX], left[kOrientationY],
+                                      left[kOrientationZ], left[kOrientationW]);
+
+    // Predict the right side.
+    const auto right = SamplePolynomial(right_time_ns);
+    out_pose->right_translation = {static_cast<float>(right[kPositionX]),
+                                   static_cast<float>(right[kPositionY]),
+                                   static_cast<float>(right[kPositionZ])};
+    out_pose->right_orientation =
+        normalize(right[kOrientationX], right[kOrientationY],
+                  right[kOrientationZ], right[kOrientationW]);
+
+    // Finite differencing to estimate the velocities.
+    const auto a = SamplePolynomial(
+        (left_time_ns + right_time_ns - kFiniteDifferenceNs) / 2);
+    const auto b = SamplePolynomial(
+        (left_time_ns + right_time_ns + kFiniteDifferenceNs) / 2);
+
+    out_pose->velocity = {static_cast<float>((b[kPositionX] - a[kPositionX]) /
+                                             NsToSeconds(kFiniteDifferenceNs)),
+                          static_cast<float>((b[kPositionY] - a[kPositionY]) /
+                                             NsToSeconds(kFiniteDifferenceNs)),
+                          static_cast<float>((b[kPositionZ] - a[kPositionZ]) /
+                                             NsToSeconds(kFiniteDifferenceNs)),
+                          0.0f};
+
+    // Get the predicted orientations into quaternions, which are probably not
+    // quite unit.
+    const quatd a_orientation(a[kOrientationW], a[kOrientationX],
+                              a[kOrientationY], a[kOrientationZ]);
+    const quatd b_orientation(b[kOrientationW], b[kOrientationX],
+                              b[kOrientationY], b[kOrientationZ]);
+    const auto angular_velocity =
+        AngularVelocity(a_orientation.normalized(), b_orientation.normalized(),
+                        NsToSeconds(kFiniteDifferenceNs));
+
+    out_pose->angular_velocity = {static_cast<float>(angular_velocity[0]),
+                                  static_cast<float>(angular_velocity[1]),
+                                  static_cast<float>(angular_velocity[2]),
+                                  0.0f};
+    out_pose->timestamp_ns = left_time_ns;
+    out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
+    memset(out_pose->pad, 0, sizeof(out_pose->pad));
+  }
+
+ private:
+  // Take a quaternion and return a normalized version in a float32x4_t.
+  static float32x4_t normalize(double x, double y, double z, double w) {
+    const auto l = std::sqrt(x * x + y * y + z * z + w * w);
+    return {static_cast<float>(x / l), static_cast<float>(y / l),
+            static_cast<float>(z / l), static_cast<float>(w / l)};
+  }
+
+  // Evaluate the polynomial at a particular time.
+  Eigen::Vector<double, kNumComponents> SamplePolynomial(
+      int64_t time_ns) const {
+    const auto t = NsToT(time_ns);
+    Eigen::Vector<double, PolynomialDegree + 1> polynomial;
+    double current_polynomial = t;
+
+    // Compute polynomial = [ 1 t t^2 ... ]
+    polynomial[0] = 1;
+    for (size_t degree = 1; degree <= PolynomialDegree;
+         ++degree, current_polynomial *= t) {
+      polynomial[degree] = polynomial[degree - 1] * t;
+    }
+
+    // The coefficients_ = [ numComponents x (polynomial degree + 1) ].
+    return coefficients_ * polynomial;
+  }
+
+  // Convert a time in nanoseconds to t.
+  // We could use the seconds as t but this would create make it more difficult
+  // to tweak the regularization amount. So we subtract the last sample time so
+  // the scale of the regularization constant doesn't change as a function of
+  // time.
+  double NsToT(int64_t time_ns) const {
+    return NsToSeconds(time_ns - buffer_[current_pose_index_].time_ns);
+  }
+
+  // The ridge regularization constant.
+  double regularization_;
+
+  // This is where we store the polynomial coefficients.
+  Eigen::Matrix<double, kNumComponents, PolynomialDegree + 1> coefficients_;
+};
+
+// Some common polynomial types.
+extern template class PolynomialPosePredictor<1, 2>;
+extern template class PolynomialPosePredictor<2, 3>;
+extern template class PolynomialPosePredictor<3, 4>;
+extern template class PolynomialPosePredictor<4, 5>;
+
+using QuadricPosePredictor = PolynomialPosePredictor<2, 3>;
+using CubicPosePredictor = PolynomialPosePredictor<3, 4>;
+using QuarticPosePredictor = PolynomialPosePredictor<4, 5>;
+}  // namespace dvr
+}  // namespace android
+
+#endif  // ANDROID_DVR_POSE_PREDICTOR_H_
diff --git a/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h b/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h
index 719edbe..d774500 100644
--- a/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h
+++ b/libs/vr/libposepredictor/include/private/dvr/pose_predictor.h
@@ -17,6 +17,9 @@
   PosePredictor() = default;
   virtual ~PosePredictor() = default;
 
+  // The nanoseconds to use for finite differencing.
+  static constexpr int64_t kFiniteDifferenceNs = 100;
+
   // Encapsulates a pose sample.
   struct Sample {
     vec3d position = vec3d::Zero();
@@ -24,6 +27,16 @@
     int64_t time_ns = 0;
   };
 
+  // Compute the angular velocity from orientation start_orientation to
+  // end_orientation in delta_time.
+  static vec3d AngularVelocity(const quatd& start_orientation,
+                               const quatd& end_orientation, double delta_time);
+
+  // Initialize the out pose from a sample.
+  static void InitializeFromSample(const Sample& sample, DvrPoseAsync* out_pose,
+                                   const vec3d& velocity,
+                                   const vec3d& angular_velocity);
+
   // Add a pose sample coming from the sensors.
   // Returns this sample as a dvr pose.
   //
diff --git a/libs/vr/libposepredictor/linear_pose_predictor.cpp b/libs/vr/libposepredictor/linear_pose_predictor.cpp
index de1b951..11db735 100644
--- a/libs/vr/libposepredictor/linear_pose_predictor.cpp
+++ b/libs/vr/libposepredictor/linear_pose_predictor.cpp
@@ -1,5 +1,3 @@
-#include <log/log.h>
-
 #include <private/dvr/linear_pose_predictor.h>
 
 namespace android {
@@ -25,22 +23,15 @@
   const auto pose_delta_time =
       NsToSeconds(sample.time_ns - previous_sample.time_ns);
 
-  const double inverse_dt = 1. / pose_delta_time;
   if (pose_delta_time > 0.0) {
-    velocity_ = (sample.position - previous_sample.position) * inverse_dt;
+    velocity_ = (sample.position - previous_sample.position) / pose_delta_time;
+    rotational_velocity_ = PosePredictor::AngularVelocity(
+        previous_sample.orientation, sample.orientation, pose_delta_time);
   } else {
     velocity_ = vec3d::Zero();
+    rotational_velocity_ = vec3d::Zero();
   }
 
-  quatd delta_q = sample.orientation.inverse() * previous_sample.orientation;
-  // Check that delta_q.w() == 1, Eigen doesn't respect this convention. If
-  // delta_q.w() == -1, we'll get the opposite velocity.
-  if (delta_q.w() < 0) {
-    delta_q.w() = -delta_q.w();
-    delta_q.vec() = -delta_q.vec();
-  }
-  rotational_velocity_ = -2.0 * delta_q.vec() * inverse_dt;
-
   // Temporary experiment with acceleration estimate.
   angular_speed_ = rotational_velocity_.norm();
   angular_accel_ = 0.0;
@@ -57,36 +48,7 @@
     rotational_axis_ = rotational_velocity_ / angular_speed_;
   }
 
-  out_pose->orientation = {static_cast<float>(sample.orientation.vec().x()),
-                           static_cast<float>(sample.orientation.vec().y()),
-                           static_cast<float>(sample.orientation.vec().z()),
-                           static_cast<float>(sample.orientation.w())};
-
-  out_pose->translation = {static_cast<float>(sample.position.x()),
-                           static_cast<float>(sample.position.y()),
-                           static_cast<float>(sample.position.z()), 0.0f};
-
-  out_pose->right_orientation = {
-      static_cast<float>(sample.orientation.vec().x()),
-      static_cast<float>(sample.orientation.vec().y()),
-      static_cast<float>(sample.orientation.vec().z()),
-      static_cast<float>(sample.orientation.w())};
-
-  out_pose->right_translation = {static_cast<float>(sample.position.x()),
-                                 static_cast<float>(sample.position.y()),
-                                 static_cast<float>(sample.position.z()), 0.0f};
-
-  out_pose->angular_velocity = {static_cast<float>(rotational_velocity_.x()),
-                                static_cast<float>(rotational_velocity_.y()),
-                                static_cast<float>(rotational_velocity_.z()),
-                                0.0f};
-
-  out_pose->velocity = {static_cast<float>(velocity_.x()),
-                        static_cast<float>(velocity_.y()),
-                        static_cast<float>(velocity_.z()), 0.0f};
-  out_pose->timestamp_ns = sample.time_ns;
-  out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
-  memset(out_pose->pad, 0, sizeof(out_pose->pad));
+  InitializeFromSample(sample, out_pose, velocity_, rotational_velocity_);
 }
 
 void LinearPosePredictor::Predict(int64_t left_time_ns, int64_t right_time_ns,
diff --git a/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp b/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp
index 6c4f58a..1f38041a 100644
--- a/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp
+++ b/libs/vr/libposepredictor/linear_pose_predictor_tests.cpp
@@ -1,5 +1,3 @@
-#include <iostream>
-
 #include <gtest/gtest.h>
 
 #include <private/dvr/linear_pose_predictor.h>
diff --git a/libs/vr/libposepredictor/polynomial_pose_predictor.cpp b/libs/vr/libposepredictor/polynomial_pose_predictor.cpp
new file mode 100644
index 0000000..47eab8a
--- /dev/null
+++ b/libs/vr/libposepredictor/polynomial_pose_predictor.cpp
@@ -0,0 +1,13 @@
+#include <private/dvr/polynomial_pose_predictor.h>
+
+namespace android {
+namespace dvr {
+
+// Instantiate the common polynomial types.
+template class PolynomialPosePredictor<1, 2>;
+template class PolynomialPosePredictor<2, 3>;
+template class PolynomialPosePredictor<3, 4>;
+template class PolynomialPosePredictor<4, 5>;
+
+}  // namespace dvr
+}  // namespace android
diff --git a/libs/vr/libposepredictor/polynomial_pose_predictor_tests.cpp b/libs/vr/libposepredictor/polynomial_pose_predictor_tests.cpp
new file mode 100644
index 0000000..9722182
--- /dev/null
+++ b/libs/vr/libposepredictor/polynomial_pose_predictor_tests.cpp
@@ -0,0 +1,139 @@
+#include <gtest/gtest.h>
+
+#include <private/dvr/polynomial_pose_predictor.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// For comparing expected and actual.
+constexpr double kAbsErrorTolerance = 1e-5;
+
+// Test the linear extrapolation from two samples.
+TEST(PolynomialPosePredictor, Linear) {
+  DvrPoseAsync dummy;
+
+  // Degree = 1, simple line, passing through two points.
+  // Note the regularization is 0 so we expect the exact fit.
+  PolynomialPosePredictor<1, 2> predictor(0);
+
+  // Add two samples.
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 0},
+      &dummy);
+
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 10},
+      &dummy);
+
+  DvrPoseAsync predicted_pose;
+
+  predictor.Predict(20, 30, &predicted_pose);
+
+  // Check the x,y,z components for the expected translation.
+  EXPECT_NEAR(predicted_pose.translation[0], 2, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.translation[1], 4, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.translation[2], 6, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.right_translation[0], 3, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.right_translation[1], 6, kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.right_translation[2], 9, kAbsErrorTolerance);
+}
+
+// Test the degree two polynomial fit.
+TEST(PolynomialPosePredictor, Quadric) {
+  DvrPoseAsync dummy;
+
+  // Degree = 2, need three samples to fit a polynomial.
+  // Note the regularization is 0 so we expect the exact fit.
+  PolynomialPosePredictor<2, 3> predictor(0);
+
+  // Add three samples.
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 0},
+      &dummy);
+
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 10},
+      &dummy);
+
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 20},
+      &dummy);
+
+  // The expected polynomials for x/y/z.
+
+  // x:  0.01 * t^2 - 0.2 * t + 1
+  const auto x = [](auto t) { return 0.01 * t * t - 0.2 * t + 1; };
+
+  // y:  0.02 * t^2 - 0.4 * t + 2
+  const auto y = [](auto t) { return 0.02 * t * t - 0.4 * t + 2; };
+
+  // z:  0.03 * t^2 - 0.6 * t + 3
+  const auto z = [](auto t) { return 0.03 * t * t - 0.6 * t + 3; };
+
+  DvrPoseAsync predicted_pose;
+
+  predictor.Predict(40, 50, &predicted_pose);
+
+  // Check the x,y,z components for the expected translation.
+  EXPECT_NEAR(predicted_pose.translation[0], x(40), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.translation[1], y(40), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.translation[2], z(40), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.right_translation[0], x(50), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.right_translation[1], y(50), kAbsErrorTolerance);
+  EXPECT_NEAR(predicted_pose.right_translation[2], z(50), kAbsErrorTolerance);
+}
+
+// Test the degree two polynomial fit with degenerate input.
+//
+// The input samples all lie in a line which would normally make our system
+// degenerate. We will rely on the regularization term to recover the linear
+// solution in a quadric predictor.
+TEST(PolynomialPosePredictor, QuadricDegenate) {
+  DvrPoseAsync dummy;
+
+  // Degree = 2, need three samples to fit a polynomial.
+  // Note that we are using the default regularization term here.
+  // We cannot use 0 regularizer since the input is degenerate.
+  PolynomialPosePredictor<2, 3> predictor(1e-20);
+
+  // Add three samples.
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {0, 0, 0}, .orientation = {0, 0, 0, 1}, .time_ns = 0},
+      &dummy);
+
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {1, 2, 3}, .orientation = {0, 0, 0, 1}, .time_ns = 10},
+      &dummy);
+
+  predictor.Add(
+      PosePredictor::Sample{
+          .position = {2, 4, 6}, .orientation = {0, 0, 0, 1}, .time_ns = 20},
+      &dummy);
+
+  DvrPoseAsync predicted_pose;
+
+  predictor.Predict(30, 40, &predicted_pose);
+
+  // Check the x,y,z components for the expected translation.
+  // We are using a higher error threshold since this is now approximate.
+  EXPECT_NEAR(predicted_pose.translation[0], 3, 0.001);
+  EXPECT_NEAR(predicted_pose.translation[1], 6, 0.001);
+  EXPECT_NEAR(predicted_pose.translation[2], 9, 0.001);
+  EXPECT_NEAR(predicted_pose.right_translation[0], 4, 0.001);
+  EXPECT_NEAR(predicted_pose.right_translation[1], 8, 0.001);
+  EXPECT_NEAR(predicted_pose.right_translation[2], 12, 0.001);
+}
+
+}  // namespace
+
+}  // namespace dvr
+}  // namespace android
diff --git a/libs/vr/libposepredictor/pose_predictor.cpp b/libs/vr/libposepredictor/pose_predictor.cpp
new file mode 100644
index 0000000..b09a152
--- /dev/null
+++ b/libs/vr/libposepredictor/pose_predictor.cpp
@@ -0,0 +1,50 @@
+#include <private/dvr/pose_predictor.h>
+
+namespace android {
+namespace dvr {
+
+vec3d PosePredictor::AngularVelocity(const quatd& a, const quatd& b,
+                                     double delta_time) {
+  const auto delta_q = b.inverse() * a;
+  // Check that delta_q.w() == 1, Eigen doesn't respect this convention. If
+  // delta_q.w() == -1, we'll get the opposite velocity.
+  return 2.0 * (delta_q.w() < 0 ? -delta_q.vec() : delta_q.vec()) / delta_time;
+}
+
+void PosePredictor::InitializeFromSample(const Sample& sample,
+                                         DvrPoseAsync* out_pose,
+                                         const vec3d& velocity,
+                                         const vec3d& angular_velocity) {
+  out_pose->orientation = {static_cast<float>(sample.orientation.vec().x()),
+                           static_cast<float>(sample.orientation.vec().y()),
+                           static_cast<float>(sample.orientation.vec().z()),
+                           static_cast<float>(sample.orientation.w())};
+
+  out_pose->translation = {static_cast<float>(sample.position.x()),
+                           static_cast<float>(sample.position.y()),
+                           static_cast<float>(sample.position.z()), 0.0f};
+
+  out_pose->right_orientation = {
+      static_cast<float>(sample.orientation.vec().x()),
+      static_cast<float>(sample.orientation.vec().y()),
+      static_cast<float>(sample.orientation.vec().z()),
+      static_cast<float>(sample.orientation.w())};
+
+  out_pose->right_translation = {static_cast<float>(sample.position.x()),
+                                 static_cast<float>(sample.position.y()),
+                                 static_cast<float>(sample.position.z()), 0.0f};
+
+  out_pose->angular_velocity = {static_cast<float>(angular_velocity.x()),
+                                static_cast<float>(angular_velocity.y()),
+                                static_cast<float>(angular_velocity.z()), 0.0f};
+
+  out_pose->velocity = {static_cast<float>(velocity.x()),
+                        static_cast<float>(velocity.y()),
+                        static_cast<float>(velocity.z()), 0.0f};
+  out_pose->timestamp_ns = sample.time_ns;
+  out_pose->flags = DVR_POSE_FLAG_HEAD | DVR_POSE_FLAG_VALID;
+  memset(out_pose->pad, 0, sizeof(out_pose->pad));
+}
+
+}  // namespace dvr
+}  // namespace android
diff --git a/libs/vr/libposepredictor/pose_predictor_tests.cpp b/libs/vr/libposepredictor/pose_predictor_tests.cpp
new file mode 100644
index 0000000..1e58b11
--- /dev/null
+++ b/libs/vr/libposepredictor/pose_predictor_tests.cpp
@@ -0,0 +1,52 @@
+#include <gtest/gtest.h>
+
+#include <private/dvr/pose_predictor.h>
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// For comparing expected and actual.
+constexpr double kAbsErrorTolerance = 1e-4;
+
+// Test the angular velocity computation from two orientations.
+TEST(PosePredictor, AngularVelocity) {
+  // Some random rotation axis we will rotate around.
+  const vec3d kRotationAxis = vec3d(1, 2, 3).normalized();
+
+  // Some random angle we will be rotating by.
+  const double kRotationAngle = M_PI / 30;
+
+  // Random start orientation we are currently at.
+  const quatd kStartOrientation = quatd(5, 3, 4, 1).normalized();
+
+  // The orientation we will end up at.
+  const quatd kEndOrientation =
+      kStartOrientation *
+      quatd(Eigen::AngleAxis<double>(kRotationAngle, kRotationAxis));
+
+  // The delta time for going from start orientation to end.
+  const float kDeltaTime = 1.0;
+
+  // Compute the angular velocity from start orientation to end.
+  const auto angularVelocity = PosePredictor::AngularVelocity(
+      kStartOrientation, kEndOrientation, kDeltaTime);
+
+  // Extract the axis and the angular speed.
+  const auto angularSpeed = angularVelocity.norm();
+  const auto rotationAxis = angularVelocity.normalized();
+
+  // The speed must match.
+  EXPECT_NEAR(angularSpeed, kRotationAngle / kDeltaTime, kAbsErrorTolerance);
+
+  // The rotation axis must match.
+  EXPECT_NEAR(rotationAxis[0], kRotationAxis[0], kAbsErrorTolerance);
+  EXPECT_NEAR(rotationAxis[1], kRotationAxis[1], kAbsErrorTolerance);
+  EXPECT_NEAR(rotationAxis[2], kRotationAxis[2], kAbsErrorTolerance);
+}
+
+}  // namespace
+
+}  // namespace dvr
+}  // namespace android
diff --git a/opengl/specs/EGL_ANDROID_get_native_client_buffer.txt b/opengl/specs/EGL_ANDROID_get_native_client_buffer.txt
index de012a0..772b21a 100644
--- a/opengl/specs/EGL_ANDROID_get_native_client_buffer.txt
+++ b/opengl/specs/EGL_ANDROID_get_native_client_buffer.txt
@@ -61,7 +61,7 @@
 
    "The command
 
-        EGLClientBuffer eglGeteNativeClientBufferANDROID(
+        EGLClientBuffer eglGetNativeClientBufferANDROID(
                                 AHardwareBuffer *buffer)
 
     may be used to create an EGLClientBuffer from an AHardwareBuffer object.
@@ -73,7 +73,7 @@
 
     Errors
 
-        If eglGeteNativeClientBufferANDROID fails, NULL will be returned, no
+        If eglGetNativeClientBufferANDROID fails, NULL will be returned, no
         memory will be allocated, and the following error will be generated:
 
        * If the value of buffer is NULL, the error EGL_BAD_PARAMETER is
@@ -92,5 +92,8 @@
 
 Revision History
 
+#2 (Craig Donner, February 17, 2017)
+    - Fix typographical errors.
+
 #1 (Craig Donner, January 27, 2017)
     - Initial draft.
diff --git a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
index a6171f5..1d2c178 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2On1Adapter.cpp
@@ -2046,6 +2046,10 @@
 
 Error HWC2On1Adapter::Layer::setSurfaceDamage(hwc_region_t damage)
 {
+    // HWC1 supports surface damage starting only with version 1.5.
+    if (mDisplay.getDevice().mHwc1MinorVersion < 5) {
+        return Error::None;
+    }
     mSurfaceDamage.resize(damage.numRects);
     std::copy_n(damage.rects, damage.numRects, mSurfaceDamage.begin());
     return Error::None;
diff --git a/services/vr/sensord/pose_service.cpp b/services/vr/sensord/pose_service.cpp
index 8e4dbba..34bcccf 100644
--- a/services/vr/sensord/pose_service.cpp
+++ b/services/vr/sensord/pose_service.cpp
@@ -20,7 +20,9 @@
 #include <pdx/default_transport/service_endpoint.h>
 #include <private/dvr/benchmark.h>
 #include <private/dvr/clock_ns.h>
+#include <private/dvr/linear_pose_predictor.h>
 #include <private/dvr/platform_defines.h>
+#include <private/dvr/polynomial_pose_predictor.h>
 #include <private/dvr/pose-ipc.h>
 #include <private/dvr/sensor_constants.h>
 #include <utils/Trace.h>
@@ -62,6 +64,7 @@
 static constexpr char kEnableSensorPlayProp[] = "dvr.enable_6dof_playback";
 static constexpr char kEnableSensorPlayIdProp[] = "dvr.6dof_playback_id";
 static constexpr char kEnablePoseRecordProp[] = "dvr.enable_pose_recording";
+static constexpr char kPredictorTypeProp[] = "dvr.predictor_type";
 
 // Persistent buffer names.
 static constexpr char kPoseRingBufferName[] = "PoseService:RingBuffer";
@@ -229,6 +232,13 @@
     }
   }
 
+  switch (property_get_int32(kPredictorTypeProp, 0)) {
+    case 1:
+      pose_predictor_ = std::make_unique<QuadricPosePredictor>();
+    default:
+      pose_predictor_ = std::make_unique<LinearPosePredictor>();
+  }
+
   enable_pose_recording_ = property_get_bool(kEnablePoseRecordProp, 0) == 1;
 
   SetPoseMode(DVR_POSE_MODE_6DOF);
@@ -326,10 +336,10 @@
     pose_timestamp = GetSystemClockNs() - 1;
 
   // Feed the sample to the predictor
-  pose_predictor_.Add(PosePredictor::Sample{.position = start_t_head,
-                                            .orientation = start_q_head,
-                                            .time_ns = pose_timestamp},
-                      &last_known_pose_);
+  pose_predictor_->Add(PosePredictor::Sample{.position = start_t_head,
+                                             .orientation = start_q_head,
+                                             .time_ns = pose_timestamp},
+                       &last_known_pose_);
 
   // Store one extra value, because the application is working on the next
   // frame and expects the minimum count from that frame on.
@@ -351,9 +361,9 @@
 
     // Make a pose prediction
     if (enable_pose_prediction_) {
-      pose_predictor_.Predict(target_time,
-                              target_time + right_eye_photon_offset_ns_,
-                              mapped_pose_buffer_->ring + index);
+      pose_predictor_->Predict(target_time,
+                               target_time + right_eye_photon_offset_ns_,
+                               mapped_pose_buffer_->ring + index);
     } else {
       mapped_pose_buffer_->ring[index] = last_known_pose_;
     }
diff --git a/services/vr/sensord/pose_service.h b/services/vr/sensord/pose_service.h
index 300737c..455f98a 100644
--- a/services/vr/sensord/pose_service.h
+++ b/services/vr/sensord/pose_service.h
@@ -12,8 +12,8 @@
 #include <pdx/service.h>
 #include <private/dvr/buffer_hub_client.h>
 #include <private/dvr/pose_client_internal.h>
+#include <private/dvr/pose_predictor.h>
 #include <private/dvr/ring_buffer.h>
-#include <private/dvr/linear_pose_predictor.h>
 
 #include "sensor_fusion.h"
 #include "sensor_thread.h"
@@ -118,7 +118,7 @@
   bool enable_external_pose_ = false;
 
   // The predictor to extrapolate pose samples.
-  LinearPosePredictor pose_predictor_;
+  std::unique_ptr<PosePredictor> pose_predictor_;
 
   // Pose ring buffer.
   std::shared_ptr<BufferProducer> ring_buffer_;