Remove graphics.cpp and friends

* Delete a bunch of code that isn't used in the O2 path

Bug: 36776792
Test: Compiled
Change-Id: I4adf6ec5678a53e0850229f9dda60e8687793376
diff --git a/libs/vr/libdisplay/Android.bp b/libs/vr/libdisplay/Android.bp
index 41baef8..d90521a 100644
--- a/libs/vr/libdisplay/Android.bp
+++ b/libs/vr/libdisplay/Android.bp
@@ -13,15 +13,9 @@
 // limitations under the License.
 
 sourceFiles = [
-    "native_buffer_queue.cpp",
     "display_client.cpp",
     "display_manager_client.cpp",
     "display_protocol.cpp",
-    "dummy_native_window.cpp",
-    "frame_history.cpp",
-    "gl_fenced_flush.cpp",
-    "graphics.cpp",
-    "late_latch.cpp",
     "vsync_client.cpp",
 ]
 
@@ -34,9 +28,6 @@
     "libcutils",
     "liblog",
     "libutils",
-    "libEGL",
-    "libGLESv2",
-    "libvulkan",
     "libui",
     "libgui",
     "libhardware",
@@ -45,10 +36,9 @@
 ]
 
 staticLibraries = [
-    "libbufferhub",
-    "libbufferhubqueue",
     "libdvrcommon",
-    "libdvrgraphics",
+    "libbufferhubqueue",
+    "libbufferhub",
     "libvrsensor",
     "libpdx_default_transport",
 ]
@@ -74,27 +64,3 @@
 
     name: "libdisplay",
 }
-
-graphicsAppTestFiles = ["tests/graphics_app_tests.cpp"]
-
-cc_test {
-    name: "graphics_app_tests",
-    tags: ["optional"],
-
-    srcs: graphicsAppTestFiles,
-
-    shared_libs: sharedLibraries,
-
-    static_libs: ["libdisplay"] + staticLibraries,
-}
-
-dummyNativeWindowTestFiles = ["tests/dummy_native_window_tests.cpp"]
-
-cc_test {
-    name: "dummy_native_window_tests",
-    tags: [ "optional" ],
-    srcs: dummyNativeWindowTestFiles,
-    shared_libs: sharedLibraries,
-    static_libs: [ "libdisplay" ] + staticLibraries,
-}
-
diff --git a/libs/vr/libdisplay/display_client.cpp b/libs/vr/libdisplay/display_client.cpp
index 5c9ebd4..935ca2e 100644
--- a/libs/vr/libdisplay/display_client.cpp
+++ b/libs/vr/libdisplay/display_client.cpp
@@ -9,7 +9,6 @@
 #include <mutex>
 
 #include <private/dvr/display_protocol.h>
-#include <private/dvr/late_latch.h>
 #include <private/dvr/native_buffer.h>
 
 using android::pdx::ErrorStatus;
diff --git a/libs/vr/libdisplay/dummy_native_window.cpp b/libs/vr/libdisplay/dummy_native_window.cpp
deleted file mode 100644
index 4628b8e..0000000
--- a/libs/vr/libdisplay/dummy_native_window.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-#include "include/private/dvr/dummy_native_window.h"
-
-#include <utils/Errors.h>
-
-namespace {
-// Dummy functions required for an ANativeWindow Implementation.
-int F1(struct ANativeWindow*, int) { return 0; }
-int F2(struct ANativeWindow*, struct ANativeWindowBuffer**) { return 0; }
-int F3(struct ANativeWindow*, struct ANativeWindowBuffer*) { return 0; }
-int F4(struct ANativeWindow*, struct ANativeWindowBuffer**, int*) { return 0; }
-int F5(struct ANativeWindow*, struct ANativeWindowBuffer*, int) { return 0; }
-}  // anonymous namespace
-
-namespace android {
-namespace dvr {
-
-DummyNativeWindow::DummyNativeWindow() {
-  ANativeWindow::setSwapInterval = F1;
-  ANativeWindow::dequeueBuffer = F4;
-  ANativeWindow::cancelBuffer = F5;
-  ANativeWindow::queueBuffer = F5;
-  ANativeWindow::query = Query;
-  ANativeWindow::perform = Perform;
-
-  ANativeWindow::dequeueBuffer_DEPRECATED = F2;
-  ANativeWindow::cancelBuffer_DEPRECATED = F3;
-  ANativeWindow::lockBuffer_DEPRECATED = F3;
-  ANativeWindow::queueBuffer_DEPRECATED = F3;
-}
-
-int DummyNativeWindow::Query(const ANativeWindow*, int what, int* value) {
-  switch (what) {
-    // This must be 1 in order for eglCreateWindowSurface to not trigger an
-    // error
-    case NATIVE_WINDOW_IS_VALID:
-      *value = 1;
-      return NO_ERROR;
-    case NATIVE_WINDOW_WIDTH:
-    case NATIVE_WINDOW_HEIGHT:
-    case NATIVE_WINDOW_FORMAT:
-    case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
-    case NATIVE_WINDOW_CONCRETE_TYPE:
-    case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER:
-    case NATIVE_WINDOW_DEFAULT_WIDTH:
-    case NATIVE_WINDOW_DEFAULT_HEIGHT:
-    case NATIVE_WINDOW_TRANSFORM_HINT:
-      *value = 0;
-      return NO_ERROR;
-  }
-
-  *value = 0;
-  return BAD_VALUE;
-}
-
-int DummyNativeWindow::Perform(ANativeWindow*, int operation, ...) {
-  switch (operation) {
-    case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
-    case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
-    case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
-    case NATIVE_WINDOW_SET_USAGE:
-    case NATIVE_WINDOW_CONNECT:
-    case NATIVE_WINDOW_DISCONNECT:
-    case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
-    case NATIVE_WINDOW_API_CONNECT:
-    case NATIVE_WINDOW_API_DISCONNECT:
-    case NATIVE_WINDOW_SET_BUFFER_COUNT:
-    case NATIVE_WINDOW_SET_BUFFERS_DATASPACE:
-    case NATIVE_WINDOW_SET_SCALING_MODE:
-      return NO_ERROR;
-    case NATIVE_WINDOW_LOCK:
-    case NATIVE_WINDOW_UNLOCK_AND_POST:
-    case NATIVE_WINDOW_SET_CROP:
-    case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
-      return INVALID_OPERATION;
-  }
-  return NAME_NOT_FOUND;
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libdisplay/frame_history.cpp b/libs/vr/libdisplay/frame_history.cpp
deleted file mode 100644
index 154afbe..0000000
--- a/libs/vr/libdisplay/frame_history.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
-#include <private/dvr/frame_history.h>
-
-#include <errno.h>
-#include <log/log.h>
-#include <sync/sync.h>
-
-#include <pdx/file_handle.h>
-#include <private/dvr/clock_ns.h>
-#include <private/dvr/sync_util.h>
-
-using android::pdx::LocalHandle;
-
-constexpr int kNumFramesToUseForSchedulePrediction = 10;
-constexpr int kDefaultVsyncIntervalPrediction = 1;
-constexpr int kMaxVsyncIntervalPrediction = 4;
-constexpr int kDefaultPendingFrameBufferSize = 10;
-
-namespace android {
-namespace dvr {
-
-FrameHistory::PendingFrame::PendingFrame()
-    : start_ns(0), scheduled_vsync(0), scheduled_finish_ns(0) {}
-
-FrameHistory::PendingFrame::PendingFrame(int64_t start_ns,
-                                         uint32_t scheduled_vsync,
-                                         int64_t scheduled_finish_ns,
-                                         LocalHandle&& fence)
-    : start_ns(start_ns), scheduled_vsync(scheduled_vsync),
-      scheduled_finish_ns(scheduled_finish_ns), fence(std::move(fence)) {}
-
-FrameHistory::FrameHistory() : FrameHistory(kDefaultPendingFrameBufferSize) {}
-
-FrameHistory::FrameHistory(int pending_frame_buffer_size)
-    : pending_frames_(pending_frame_buffer_size),
-      finished_frames_(pending_frame_buffer_size),
-      frame_duration_history_(kNumFramesToUseForSchedulePrediction) {}
-
-void FrameHistory::Reset(int pending_frame_buffer_size) {
-  pending_frames_.Reset(pending_frame_buffer_size);
-  finished_frames_.Reset(pending_frame_buffer_size);
-  frame_duration_history_.Clear();
-}
-
-void FrameHistory::OnFrameStart(uint32_t scheduled_vsync,
-                                int64_t scheduled_finish_ns) {
-  if (!pending_frames_.IsEmpty() && !pending_frames_.Back().fence) {
-    // If we don't have a fence set for the previous frame it's because
-    // OnFrameStart() was called twice in a row with no OnFrameSubmit() call. In
-    // that case throw out the pending frame data for the last frame.
-    pending_frames_.PopBack();
-  }
-
-  if (pending_frames_.IsFull()) {
-    ALOGW("Pending frames buffer is full. Discarding pending frame data.");
-  }
-
-  pending_frames_.Append(PendingFrame(GetSystemClockNs(), scheduled_vsync,
-                                      scheduled_finish_ns, LocalHandle()));
-}
-
-void FrameHistory::OnFrameSubmit(LocalHandle&& fence) {
-  // Add the fence to the previous frame data in pending_frames so we can
-  // track when it finishes.
-  if (!pending_frames_.IsEmpty() && !pending_frames_.Back().fence) {
-    if (fence && pending_frames_.Back().scheduled_vsync != UINT32_MAX)
-      pending_frames_.Back().fence = std::move(fence);
-    else
-      pending_frames_.PopBack();
-  }
-}
-
-void FrameHistory::CheckForFinishedFrames() {
-  if (pending_frames_.IsEmpty())
-    return;
-
-  android::dvr::FenceInfoBuffer fence_info_buffer;
-  while (!pending_frames_.IsEmpty()) {
-    const auto& pending_frame = pending_frames_.Front();
-    if (!pending_frame.fence) {
-      // The frame hasn't been submitted yet, so there's nothing more to do
-      break;
-    }
-
-    int64_t fence_signaled_time = -1;
-    int fence = pending_frame.fence.Get();
-    int sync_result = sync_wait(fence, 0);
-    if (sync_result == 0) {
-      int fence_signaled_result =
-          GetFenceSignaledTimestamp(fence, &fence_info_buffer,
-                                    &fence_signaled_time);
-      if (fence_signaled_result < 0) {
-        ALOGE("Failed getting signaled timestamp from fence");
-      } else {
-        // The frame is finished. Record the duration and move the frame data
-        // from pending_frames_ to finished_frames_.
-        DvrFrameScheduleResult schedule_result = {};
-        schedule_result.vsync_count = pending_frame.scheduled_vsync;
-        schedule_result.scheduled_frame_finish_ns =
-            pending_frame.scheduled_finish_ns;
-        schedule_result.frame_finish_offset_ns =
-            fence_signaled_time - pending_frame.scheduled_finish_ns;
-        finished_frames_.Append(schedule_result);
-        frame_duration_history_.Append(
-            fence_signaled_time - pending_frame.start_ns);
-      }
-      pending_frames_.PopFront();
-    } else {
-      if (errno != ETIME) {
-        ALOGE("sync_wait on frame fence failed. fence=%d errno=%d (%s).",
-              fence, errno, strerror(errno));
-      }
-      break;
-    }
-  }
-}
-
-int FrameHistory::PredictNextFrameVsyncInterval(int64_t vsync_period_ns) const {
-  if (frame_duration_history_.IsEmpty())
-    return kDefaultVsyncIntervalPrediction;
-
-  double total = 0;
-  for (size_t i = 0; i < frame_duration_history_.GetSize(); ++i)
-    total += frame_duration_history_.Get(i);
-  double avg_duration = total / frame_duration_history_.GetSize();
-
-  return std::min(kMaxVsyncIntervalPrediction,
-                  static_cast<int>(avg_duration / vsync_period_ns) + 1);
-}
-
-int FrameHistory::GetPreviousFrameResults(DvrFrameScheduleResult* results,
-                                          int in_result_count) {
-  int out_result_count =
-      std::min(in_result_count, static_cast<int>(finished_frames_.GetSize()));
-  for (int i = 0; i < out_result_count; ++i) {
-    results[i] = finished_frames_.Get(0);
-    finished_frames_.PopFront();
-  }
-  return out_result_count;
-}
-
-uint32_t FrameHistory::GetCurrentFrameVsync() const {
-  return pending_frames_.IsEmpty() ?
-      UINT32_MAX : pending_frames_.Back().scheduled_vsync;
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libdisplay/gl_fenced_flush.cpp b/libs/vr/libdisplay/gl_fenced_flush.cpp
deleted file mode 100644
index c70d554..0000000
--- a/libs/vr/libdisplay/gl_fenced_flush.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-#include "include/private/dvr/gl_fenced_flush.h"
-
-#include <EGL/eglext.h>
-#include <GLES3/gl31.h>
-
-#define ATRACE_TAG ATRACE_TAG_GRAPHICS
-#include <utils/Trace.h>
-
-#include <log/log.h>
-
-using android::pdx::LocalHandle;
-
-namespace android {
-namespace dvr {
-
-LocalHandle CreateGLSyncAndFlush(EGLDisplay display) {
-  ATRACE_NAME("CreateGLSyncAndFlush");
-
-  EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID,
-                      EGL_NO_NATIVE_FENCE_FD_ANDROID, EGL_NONE};
-  EGLSyncKHR sync_point =
-      eglCreateSyncKHR(display, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs);
-  glFlush();
-  if (sync_point == EGL_NO_SYNC_KHR) {
-    ALOGE("sync_point == EGL_NO_SYNC_KHR");
-    return LocalHandle();
-  }
-  EGLint fence_fd = eglDupNativeFenceFDANDROID(display, sync_point);
-  eglDestroySyncKHR(display, sync_point);
-
-  if (fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
-    ALOGE("fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID");
-    return LocalHandle();
-  }
-  return LocalHandle(fence_fd);
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libdisplay/graphics.cpp b/libs/vr/libdisplay/graphics.cpp
deleted file mode 100644
index cc140af..0000000
--- a/libs/vr/libdisplay/graphics.cpp
+++ /dev/null
@@ -1,1607 +0,0 @@
-#include <dvr/graphics.h>
-
-#include <inttypes.h>
-#include <sys/timerfd.h>
-#include <array>
-#include <vector>
-
-#include <log/log.h>
-#include <utils/Trace.h>
-
-#ifndef VK_USE_PLATFORM_ANDROID_KHR
-#define VK_USE_PLATFORM_ANDROID_KHR 1
-#endif
-#include <vulkan/vulkan.h>
-
-#include <dvr/dvr_display_types.h>
-#include <pdx/file_handle.h>
-#include <private/dvr/clock_ns.h>
-#include <private/dvr/debug.h>
-#include <private/dvr/frame_history.h>
-#include <private/dvr/gl_fenced_flush.h>
-#include <private/dvr/graphics/vr_gl_extensions.h>
-#include <private/dvr/graphics_private.h>
-#include <private/dvr/late_latch.h>
-#include <private/dvr/native_buffer_queue.h>
-#include <private/dvr/platform_defines.h>
-#include <private/dvr/sensor_constants.h>
-#include <private/dvr/vsync_client.h>
-
-#include <system/window.h>
-
-#ifndef EGL_CONTEXT_MAJOR_VERSION
-#define EGL_CONTEXT_MAJOR_VERSION 0x3098
-#define EGL_CONTEXT_MINOR_VERSION 0x30FB
-#endif
-
-using android::pdx::ErrorStatus;
-using android::pdx::LocalHandle;
-using android::pdx::LocalChannelHandle;
-using android::pdx::Status;
-
-using android::dvr::display::DisplayClient;
-using android::dvr::display::Metrics;
-using android::dvr::display::NativeBufferQueue;
-using android::dvr::display::Surface;
-using android::dvr::display::SurfaceAttribute;
-using android::dvr::display::SurfaceAttributes;
-using android::dvr::display::SurfaceAttributeValue;
-using android::dvr::VSyncClient;
-
-namespace {
-
-// TODO(urbanus): revisit once we have per-platform usage config in place.
-constexpr uint64_t kDefaultDisplaySurfaceUsage =
-    GRALLOC1_PRODUCER_USAGE_GPU_RENDER_TARGET |
-    GRALLOC1_PRODUCER_USAGE_PRIVATE_1 | GRALLOC1_CONSUMER_USAGE_CLIENT_TARGET |
-    GRALLOC1_CONSUMER_USAGE_GPU_TEXTURE;
-constexpr uint32_t kDefaultDisplaySurfaceFormat = HAL_PIXEL_FORMAT_RGBA_8888;
-// TODO(alexst): revisit this count when HW encode is available for casting.
-constexpr size_t kDefaultBufferCount = 4;
-
-// Use with dvrBeginRenderFrame to disable EDS for the current frame.
-constexpr float32x4_t DVR_POSE_NO_EDS = {10.0f, 0.0f, 0.0f, 0.0f};
-
-// Use with dvrBeginRenderFrame to indicate that GPU late-latching is being used
-// for determining the render pose.
-constexpr float32x4_t DVR_POSE_LATE_LATCH = {20.0f, 0.0f, 0.0f, 0.0f};
-
-#ifndef NDEBUG
-
-static const char* GetGlCallbackType(GLenum type) {
-  switch (type) {
-    case GL_DEBUG_TYPE_ERROR_KHR:
-      return "ERROR";
-    case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR:
-      return "DEPRECATED_BEHAVIOR";
-    case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR:
-      return "UNDEFINED_BEHAVIOR";
-    case GL_DEBUG_TYPE_PORTABILITY_KHR:
-      return "PORTABILITY";
-    case GL_DEBUG_TYPE_PERFORMANCE_KHR:
-      return "PERFORMANCE";
-    case GL_DEBUG_TYPE_OTHER_KHR:
-      return "OTHER";
-    default:
-      return "UNKNOWN";
-  }
-}
-
-static void on_gl_error(GLenum /*source*/, GLenum type, GLuint /*id*/,
-                        GLenum severity, GLsizei /*length*/,
-                        const char* message, const void* /*user_param*/) {
-  char msg[400];
-  snprintf(msg, sizeof(msg), "[" __FILE__ ":%u] GL %s: %s", __LINE__,
-           GetGlCallbackType(type), message);
-  switch (severity) {
-    case GL_DEBUG_SEVERITY_LOW_KHR:
-      ALOGI("%s", msg);
-      break;
-    case GL_DEBUG_SEVERITY_MEDIUM_KHR:
-      ALOGW("%s", msg);
-      break;
-    case GL_DEBUG_SEVERITY_HIGH_KHR:
-      ALOGE("%s", msg);
-      break;
-  }
-  fprintf(stderr, "%s\n", msg);
-}
-
-#endif
-
-int DvrToHalSurfaceFormat(int dvr_surface_format) {
-  switch (dvr_surface_format) {
-    case DVR_SURFACE_FORMAT_RGBA_8888:
-      return HAL_PIXEL_FORMAT_RGBA_8888;
-    case DVR_SURFACE_FORMAT_RGB_565:
-      return HAL_PIXEL_FORMAT_RGB_565;
-    default:
-      return HAL_PIXEL_FORMAT_RGBA_8888;
-  }
-}
-
-int SelectEGLConfig(EGLDisplay dpy, EGLint* attr, unsigned format,
-                    EGLConfig* config) {
-  std::array<EGLint, 4> desired_rgba;
-  switch (format) {
-    case HAL_PIXEL_FORMAT_RGBA_8888:
-    case HAL_PIXEL_FORMAT_BGRA_8888:
-      desired_rgba = {{8, 8, 8, 8}};
-      break;
-    case HAL_PIXEL_FORMAT_RGB_565:
-      desired_rgba = {{5, 6, 5, 0}};
-      break;
-    default:
-      ALOGE("Unsupported framebuffer pixel format %d", format);
-      return -1;
-  }
-
-  EGLint max_configs = 0;
-  if (eglGetConfigs(dpy, NULL, 0, &max_configs) == EGL_FALSE) {
-    ALOGE("No EGL configurations available?!");
-    return -1;
-  }
-
-  std::vector<EGLConfig> configs(max_configs);
-
-  EGLint num_configs;
-  if (eglChooseConfig(dpy, attr, &configs[0], max_configs, &num_configs) ==
-      EGL_FALSE) {
-    ALOGE("eglChooseConfig failed");
-    return -1;
-  }
-
-  std::array<EGLint, 4> config_rgba;
-  for (int i = 0; i < num_configs; i++) {
-    eglGetConfigAttrib(dpy, configs[i], EGL_RED_SIZE, &config_rgba[0]);
-    eglGetConfigAttrib(dpy, configs[i], EGL_GREEN_SIZE, &config_rgba[1]);
-    eglGetConfigAttrib(dpy, configs[i], EGL_BLUE_SIZE, &config_rgba[2]);
-    eglGetConfigAttrib(dpy, configs[i], EGL_ALPHA_SIZE, &config_rgba[3]);
-    if (config_rgba == desired_rgba) {
-      *config = configs[i];
-      return 0;
-    }
-  }
-
-  ALOGE("Cannot find a matching EGL config");
-  return -1;
-}
-
-void DestroyEglContext(EGLDisplay egl_display, EGLContext* egl_context) {
-  if (*egl_context != EGL_NO_CONTEXT) {
-    eglDestroyContext(egl_display, *egl_context);
-    *egl_context = EGL_NO_CONTEXT;
-  }
-}
-
-// Perform internal initialization. A GL context must be bound to the current
-// thread.
-// @param internally_created_context True if we created and own the GL context,
-//        false if it was supplied by the application.
-// @return 0 if init was successful, or a negative error code on failure.
-int InitGl(bool internally_created_context) {
-  EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
-  if (egl_display == EGL_NO_DISPLAY) {
-    ALOGE("eglGetDisplay failed");
-    return -EINVAL;
-  }
-
-  EGLContext egl_context = eglGetCurrentContext();
-  if (egl_context == EGL_NO_CONTEXT) {
-    ALOGE("No GL context bound");
-    return -EINVAL;
-  }
-
-  glGetError();  // Clear the error state
-  GLint major_version, minor_version;
-  glGetIntegerv(GL_MAJOR_VERSION, &major_version);
-  glGetIntegerv(GL_MINOR_VERSION, &minor_version);
-  if (glGetError() != GL_NO_ERROR) {
-    // GL_MAJOR_VERSION and GL_MINOR_VERSION were added in GLES 3. If we get an
-    // error querying them it's almost certainly because it's GLES 1 or 2.
-    ALOGE("Error getting GL version. Must be GLES 3.2 or greater.");
-    return -EINVAL;
-  }
-
-  if (major_version < 3 || (major_version == 3 && minor_version < 2)) {
-    ALOGE("Invalid GL version: %d.%d. Must be GLES 3.2 or greater.",
-          major_version, minor_version);
-    return -EINVAL;
-  }
-
-#ifndef NDEBUG
-  if (internally_created_context) {
-    // Enable verbose GL debug output.
-    glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR);
-    glDebugMessageCallbackKHR(on_gl_error, NULL);
-    GLuint unused_ids = 0;
-    glDebugMessageControlKHR(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0,
-                             &unused_ids, GL_TRUE);
-  }
-#else
-  (void)internally_created_context;
-#endif
-
-  load_gl_extensions();
-  return 0;
-}
-
-int CreateEglContext(EGLDisplay egl_display, DvrSurfaceParameter* parameters,
-                     EGLContext* egl_context) {
-  *egl_context = EGL_NO_CONTEXT;
-
-  EGLint major, minor;
-  if (!eglInitialize(egl_display, &major, &minor)) {
-    ALOGE("Failed to initialize EGL");
-    return -ENXIO;
-  }
-
-  ALOGI("EGL version: %d.%d\n", major, minor);
-
-  int buffer_format = kDefaultDisplaySurfaceFormat;
-
-  for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-    switch (p->key) {
-      case DVR_SURFACE_PARAMETER_FORMAT_IN:
-        buffer_format = DvrToHalSurfaceFormat(p->value);
-        break;
-    }
-  }
-
-  EGLint config_attrs[] = {EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
-                           EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE};
-  EGLConfig config = {0};
-
-  int ret = SelectEGLConfig(egl_display, config_attrs, buffer_format, &config);
-  if (ret < 0)
-    return ret;
-
-  ALOGI("EGL SelectEGLConfig ok.\n");
-
-  EGLint context_attrs[] = {EGL_CONTEXT_MAJOR_VERSION,
-                            3,
-                            EGL_CONTEXT_MINOR_VERSION,
-                            2,
-#ifndef NDEBUG
-                            EGL_CONTEXT_FLAGS_KHR,
-                            EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR,
-#endif
-                            EGL_NONE};
-
-  *egl_context =
-      eglCreateContext(egl_display, config, EGL_NO_CONTEXT, context_attrs);
-  if (*egl_context == EGL_NO_CONTEXT) {
-    ALOGE("eglCreateContext failed");
-    return -ENXIO;
-  }
-
-  ALOGI("eglCreateContext ok.\n");
-
-  if (!eglMakeCurrent(egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
-                      *egl_context)) {
-    ALOGE("eglMakeCurrent failed");
-    DestroyEglContext(egl_display, egl_context);
-    return -EINVAL;
-  }
-
-  return 0;
-}
-
-// Utility structure to hold info related to creating a surface.
-struct SurfaceResult {
-  std::shared_ptr<Surface> surface;
-  Metrics metrics;
-  uint32_t width;
-  uint32_t height;
-  uint32_t format;
-  uint64_t usage;
-  size_t capacity;
-  int geometry;
-  bool direct_surface;
-};
-
-Status<std::tuple<std::shared_ptr<android::dvr::ProducerQueue>,
-                  std::shared_ptr<android::dvr::BufferProducer>,
-                  volatile DisplaySurfaceMetadata*>>
-CreateMetadataBuffer(const std::shared_ptr<Surface>& surface,
-                     bool direct_surface) {
-  std::shared_ptr<android::dvr::ProducerQueue> queue;
-  std::shared_ptr<android::dvr::BufferProducer> buffer;
-
-  if (!direct_surface) {
-    auto queue_status = surface->CreateQueue(
-        sizeof(DisplaySurfaceMetadata), 1, 1, HAL_PIXEL_FORMAT_BLOB,
-        GRALLOC1_PRODUCER_USAGE_GPU_RENDER_TARGET |
-            GRALLOC1_PRODUCER_USAGE_CPU_WRITE_OFTEN |
-            GRALLOC1_CONSUMER_USAGE_GPU_DATA_BUFFER,
-        1);
-    if (!queue_status) {
-      ALOGE("CreateMetadataBuffer: Failed to create queue: %s",
-            queue_status.GetErrorMessage().c_str());
-      return queue_status.error_status();
-    }
-
-    queue = queue_status.take();
-    LocalHandle fence;
-    size_t slot;
-    auto buffer_status = queue->Dequeue(-1, &slot, &fence);
-    if (!buffer_status) {
-      ALOGE("CreateMetadataBuffer: Failed to dequeue buffer: %s",
-            buffer_status.GetErrorMessage().c_str());
-      return buffer_status.error_status();
-    }
-    buffer = buffer_status.take();
-  } else {
-    buffer = android::dvr::BufferProducer::CreateUncachedBlob(
-        sizeof(DisplaySurfaceMetadata));
-    if (!buffer) {
-      ALOGE("CreateMetadataBuffer: Failed to create stand-in buffer!");
-      return ErrorStatus(ENOMEM);
-    }
-  }
-
-  void* address = nullptr;
-  int ret =
-      buffer->GetBlobReadWritePointer(sizeof(DisplaySurfaceMetadata), &address);
-
-  if (ret < 0) {
-    ALOGE("CreateMetadataBuffer: Failed to map buffer: %s", strerror(-ret));
-    return ErrorStatus(-ret);
-  }
-
-  // Post the buffer so that the compositor can retrieve it from the consumer
-  // queue.
-  ret = buffer->Post<void>(LocalHandle{});
-  if (ret < 0) {
-    ALOGE("CreateMetadataBuffer: Failed to post buffer: %s", strerror(-ret));
-    return ErrorStatus(-ret);
-  }
-
-  ALOGD_IF(TRACE, "CreateMetadataBuffer: queue_id=%d buffer_id=%d address=%p",
-           queue ? queue->id() : -1, buffer->id(), address);
-  return {{std::move(queue), std::move(buffer),
-           static_cast<DisplaySurfaceMetadata*>(address)}};
-}
-
-}  // anonymous namespace
-
-Status<SurfaceResult> CreateSurface(struct DvrSurfaceParameter* parameters) {
-  int error;
-  auto client = DisplayClient::Create(&error);
-  if (!client) {
-    ALOGE("CreateApplicationSurface: Failed to create display client!");
-    return ErrorStatus(error);
-  }
-
-  auto metrics_status = client->GetDisplayMetrics();
-  if (!metrics_status) {
-    ALOGE("CreateApplicationSurface: Failed to get display metrics: %s",
-          metrics_status.GetErrorMessage().c_str());
-    return metrics_status.error_status();
-  }
-
-  // Parameters that may be modified by the parameters array. Some of these are
-  // here for future expansion.
-
-  uint32_t request_width = metrics_status.get().display_width;
-  uint32_t request_height = metrics_status.get().display_width;
-  uint32_t request_format = kDefaultDisplaySurfaceFormat;
-  uint64_t request_usage = kDefaultDisplaySurfaceUsage;
-  size_t request_capacity = kDefaultBufferCount;
-  int request_geometry = DVR_SURFACE_GEOMETRY_SINGLE;
-  bool disable_distortion = false;
-  bool disable_stabilization = false;
-  bool disable_cac = false;
-  bool request_visible = false;
-  bool vertical_flip = false;
-  bool direct_surface = false;
-  int request_z_order = 0;
-
-  // Handle parameter inputs.
-  for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-    switch (p->key) {
-      case DVR_SURFACE_PARAMETER_DISABLE_DISTORTION_IN:
-        disable_distortion = !!p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_DISABLE_STABILIZATION_IN:
-        disable_stabilization = !!p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_DISABLE_CAC_IN:
-        disable_cac = !!p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_VISIBLE_IN:
-        request_visible = !!p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_Z_ORDER_IN:
-        request_z_order = p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_VERTICAL_FLIP_IN:
-        vertical_flip = !!p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_DIRECT_IN:
-        direct_surface = !!p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_WIDTH_IN:
-        request_width = p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_HEIGHT_IN:
-        request_height = p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_FORMAT_IN:
-        request_format = p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_GEOMETRY_IN:
-        request_geometry = p->value;
-        break;
-      case DVR_SURFACE_PARAMETER_ENABLE_LATE_LATCH_IN:
-      case DVR_SURFACE_PARAMETER_CREATE_GL_CONTEXT_IN:
-      case DVR_SURFACE_PARAMETER_DISPLAY_WIDTH_OUT:
-      case DVR_SURFACE_PARAMETER_DISPLAY_HEIGHT_OUT:
-      case DVR_SURFACE_PARAMETER_SURFACE_WIDTH_OUT:
-      case DVR_SURFACE_PARAMETER_SURFACE_HEIGHT_OUT:
-      case DVR_SURFACE_PARAMETER_INTER_LENS_METERS_OUT:
-      case DVR_SURFACE_PARAMETER_LEFT_FOV_LRBT_OUT:
-      case DVR_SURFACE_PARAMETER_RIGHT_FOV_LRBT_OUT:
-      case DVR_SURFACE_PARAMETER_VSYNC_PERIOD_OUT:
-      case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_TYPE_OUT:
-      case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_ID_OUT:
-      case DVR_SURFACE_PARAMETER_GRAPHICS_API_IN:
-      case DVR_SURFACE_PARAMETER_VK_INSTANCE_IN:
-      case DVR_SURFACE_PARAMETER_VK_PHYSICAL_DEVICE_IN:
-      case DVR_SURFACE_PARAMETER_VK_DEVICE_IN:
-      case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_IN:
-      case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_FAMILY_IN:
-      case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_COUNT_OUT:
-      case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT:
-        break;
-      default:
-        ALOGE(
-            "CreateSurface: Invalid display surface parameter: key=%d "
-            "value=%" PRId64,
-            p->key, p->value);
-        return ErrorStatus(EINVAL);
-    }
-  }
-
-  // TODO(eieio): Setup a "surface flags" attribute based on the surface
-  // parameters gathered above.
-  SurfaceAttributes surface_attributes;
-
-  surface_attributes[SurfaceAttribute::Direct] = direct_surface;
-  surface_attributes[SurfaceAttribute::Visible] = request_visible;
-  surface_attributes[SurfaceAttribute::ZOrder] = request_z_order;
-
-  auto surface_status = Surface::CreateSurface(surface_attributes);
-  if (!surface_status) {
-    ALOGE("CreateSurface: Failed to create surface: %s",
-          surface_status.GetErrorMessage().c_str());
-    return surface_status.error_status();
-  }
-
-  return {{surface_status.take(), metrics_status.get(), request_width,
-           request_height, request_format, request_usage, request_capacity,
-           request_geometry, direct_surface}};
-}
-
-// TODO(hendrikw): When we remove the calls to this in native_window.cpp, move
-// this back into the anonymous namespace
-Status<SurfaceResult> CreateApplicationSurface(
-    struct DvrSurfaceParameter* parameters) {
-  auto surface_status = CreateSurface(parameters);
-  if (!surface_status)
-    return surface_status;
-
-  // Handle parameter output requests down here so we can return surface info.
-  for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-    switch (p->key) {
-      case DVR_SURFACE_PARAMETER_DISPLAY_WIDTH_OUT:
-        *static_cast<int32_t*>(p->value_out) =
-            surface_status.get().metrics.display_width;
-        break;
-      case DVR_SURFACE_PARAMETER_DISPLAY_HEIGHT_OUT:
-        *static_cast<int32_t*>(p->value_out) =
-            surface_status.get().metrics.display_height;
-        break;
-      case DVR_SURFACE_PARAMETER_VSYNC_PERIOD_OUT:
-        *static_cast<uint64_t*>(p->value_out) =
-            surface_status.get().metrics.vsync_period_ns;
-        break;
-      case DVR_SURFACE_PARAMETER_SURFACE_WIDTH_OUT:
-        *static_cast<uint32_t*>(p->value_out) = surface_status.get().width;
-        break;
-      case DVR_SURFACE_PARAMETER_SURFACE_HEIGHT_OUT:
-        *static_cast<uint32_t*>(p->value_out) = surface_status.get().height;
-        break;
-
-      default:
-        break;
-    }
-  }
-
-  return surface_status;
-}
-
-extern "C" int dvrGetNativeDisplayDimensions(int* display_width,
-                                             int* display_height) {
-  int error = 0;
-  auto client = DisplayClient::Create(&error);
-  if (!client) {
-    ALOGE("dvrGetNativeDisplayDimensions: Failed to create display client!");
-    return -error;
-  }
-
-  auto metrics_status = client->GetDisplayMetrics();
-  if (!metrics_status) {
-    ALOGE("dvrGetNativeDisplayDimensions: Failed to get display metrics: %s",
-          metrics_status.GetErrorMessage().c_str());
-    return -metrics_status.error();
-  }
-
-  *display_width = static_cast<int>(metrics_status.get().display_width);
-  *display_height = static_cast<int>(metrics_status.get().display_height);
-  return 0;
-}
-
-struct DvrGraphicsContext : public android::ANativeObjectBase<
-                                ANativeWindow, DvrGraphicsContext,
-                                android::LightRefBase<DvrGraphicsContext>> {
- public:
-  DvrGraphicsContext();
-  ~DvrGraphicsContext();
-
-  int graphics_api;  // DVR_SURFACE_GRAPHICS_API_*
-
-  // GL specific members.
-  struct {
-    EGLDisplay egl_display;
-    EGLContext egl_context;
-    bool owns_egl_context;
-    GLuint texture_id[kSurfaceViewMaxCount];
-    int texture_count;
-    GLenum texture_target_type;
-  } gl;
-
-  // VK specific members
-  struct {
-    // These objects are passed in by the application, and are NOT owned
-    // by the context.
-    VkInstance instance;
-    VkPhysicalDevice physical_device;
-    VkDevice device;
-    VkQueue present_queue;
-    uint32_t present_queue_family;
-    const VkAllocationCallbacks* allocation_callbacks;
-    // These objects are owned by the context.
-    ANativeWindow* window;
-    VkSurfaceKHR surface;
-    VkSwapchainKHR swapchain;
-    std::vector<VkImage> swapchain_images;
-    std::vector<VkImageView> swapchain_image_views;
-  } vk;
-
-  // Display surface, metrics, and buffer management members.
-  std::shared_ptr<Surface> display_surface;
-  uint32_t width;
-  uint32_t height;
-  uint32_t format;
-  Metrics display_metrics;
-  std::unique_ptr<NativeBufferQueue> buffer_queue;
-  android::dvr::NativeBufferProducer* current_buffer;
-  bool buffer_already_posted;
-
-  // Synchronization members.
-  std::unique_ptr<android::dvr::VSyncClient> vsync_client;
-  LocalHandle timerfd;
-
-  android::dvr::FrameHistory frame_history;
-
-  // Metadata queue and buffer.
-  // TODO(eieio): Remove the queue once one-off buffers are supported as a
-  // surface primitive element.
-  std::shared_ptr<android::dvr::ProducerQueue> metadata_queue;
-  std::shared_ptr<android::dvr::BufferProducer> metadata_buffer;
-  // Mapped surface metadata (ie: for pose delivery with presented frames).
-  volatile DisplaySurfaceMetadata* surface_metadata;
-
-  // LateLatch support.
-  std::unique_ptr<android::dvr::LateLatch> late_latch;
-
- private:
-  // ANativeWindow function implementations
-  std::mutex lock_;
-  int Post(android::dvr::NativeBufferProducer* buffer, int fence_fd);
-  static int SetSwapInterval(ANativeWindow* window, int interval);
-  static int DequeueBuffer(ANativeWindow* window, ANativeWindowBuffer** buffer,
-                           int* fence_fd);
-  static int QueueBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
-                         int fence_fd);
-  static int CancelBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
-                          int fence_fd);
-  static int Query(const ANativeWindow* window, int what, int* value);
-  static int Perform(ANativeWindow* window, int operation, ...);
-  static int DequeueBuffer_DEPRECATED(ANativeWindow* window,
-                                      ANativeWindowBuffer** buffer);
-  static int CancelBuffer_DEPRECATED(ANativeWindow* window,
-                                     ANativeWindowBuffer* buffer);
-  static int QueueBuffer_DEPRECATED(ANativeWindow* window,
-                                    ANativeWindowBuffer* buffer);
-  static int LockBuffer_DEPRECATED(ANativeWindow* window,
-                                   ANativeWindowBuffer* buffer);
-
-  DvrGraphicsContext(const DvrGraphicsContext&) = delete;
-  void operator=(const DvrGraphicsContext&) = delete;
-};
-
-DvrGraphicsContext::DvrGraphicsContext()
-    : graphics_api(DVR_GRAPHICS_API_GLES),
-      gl{},
-      vk{},
-      current_buffer(nullptr),
-      buffer_already_posted(false),
-      surface_metadata(nullptr) {
-  gl.egl_display = EGL_NO_DISPLAY;
-  gl.egl_context = EGL_NO_CONTEXT;
-  gl.owns_egl_context = true;
-  gl.texture_target_type = GL_TEXTURE_2D;
-
-  ANativeWindow::setSwapInterval = SetSwapInterval;
-  ANativeWindow::dequeueBuffer = DequeueBuffer;
-  ANativeWindow::cancelBuffer = CancelBuffer;
-  ANativeWindow::queueBuffer = QueueBuffer;
-  ANativeWindow::query = Query;
-  ANativeWindow::perform = Perform;
-
-  ANativeWindow::dequeueBuffer_DEPRECATED = DequeueBuffer_DEPRECATED;
-  ANativeWindow::cancelBuffer_DEPRECATED = CancelBuffer_DEPRECATED;
-  ANativeWindow::lockBuffer_DEPRECATED = LockBuffer_DEPRECATED;
-  ANativeWindow::queueBuffer_DEPRECATED = QueueBuffer_DEPRECATED;
-}
-
-DvrGraphicsContext::~DvrGraphicsContext() {
-  if (graphics_api == DVR_GRAPHICS_API_GLES) {
-    glDeleteTextures(gl.texture_count, gl.texture_id);
-    if (gl.owns_egl_context)
-      DestroyEglContext(gl.egl_display, &gl.egl_context);
-  } else if (graphics_api == DVR_GRAPHICS_API_VULKAN) {
-    if (vk.swapchain != VK_NULL_HANDLE) {
-      for (auto view : vk.swapchain_image_views) {
-        vkDestroyImageView(vk.device, view, vk.allocation_callbacks);
-      }
-      vkDestroySwapchainKHR(vk.device, vk.swapchain, vk.allocation_callbacks);
-      vkDestroySurfaceKHR(vk.instance, vk.surface, vk.allocation_callbacks);
-      delete vk.window;
-    }
-  }
-}
-
-int dvrGraphicsContextCreate(struct DvrSurfaceParameter* parameters,
-                             DvrGraphicsContext** return_graphics_context) {
-  auto context = std::make_unique<DvrGraphicsContext>();
-
-  // See whether we're using GL or Vulkan
-  for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-    switch (p->key) {
-      case DVR_SURFACE_PARAMETER_GRAPHICS_API_IN:
-        context->graphics_api = p->value;
-        break;
-    }
-  }
-
-  if (context->graphics_api == DVR_GRAPHICS_API_GLES) {
-    context->gl.egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
-    if (context->gl.egl_display == EGL_NO_DISPLAY) {
-      ALOGE("eglGetDisplay failed");
-      return -ENXIO;
-    }
-
-    // See if we should create a GL context
-    for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-      switch (p->key) {
-        case DVR_SURFACE_PARAMETER_CREATE_GL_CONTEXT_IN:
-          context->gl.owns_egl_context = p->value != 0;
-          break;
-      }
-    }
-
-    if (context->gl.owns_egl_context) {
-      int ret = CreateEglContext(context->gl.egl_display, parameters,
-                                 &context->gl.egl_context);
-      if (ret < 0)
-        return ret;
-    } else {
-      context->gl.egl_context = eglGetCurrentContext();
-    }
-
-    int ret = InitGl(context->gl.owns_egl_context);
-    if (ret < 0)
-      return ret;
-  } else if (context->graphics_api == DVR_GRAPHICS_API_VULKAN) {
-    for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-      switch (p->key) {
-        case DVR_SURFACE_PARAMETER_VK_INSTANCE_IN:
-          context->vk.instance = reinterpret_cast<VkInstance>(p->value);
-          break;
-        case DVR_SURFACE_PARAMETER_VK_PHYSICAL_DEVICE_IN:
-          context->vk.physical_device =
-              reinterpret_cast<VkPhysicalDevice>(p->value);
-          break;
-        case DVR_SURFACE_PARAMETER_VK_DEVICE_IN:
-          context->vk.device = reinterpret_cast<VkDevice>(p->value);
-          break;
-        case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_IN:
-          context->vk.present_queue = reinterpret_cast<VkQueue>(p->value);
-          break;
-        case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_FAMILY_IN:
-          context->vk.present_queue_family = static_cast<uint32_t>(p->value);
-          break;
-      }
-    }
-  } else {
-    ALOGE("Error: invalid graphics API type");
-    return -EINVAL;
-  }
-
-  auto surface_status = CreateApplicationSurface(parameters);
-  if (!surface_status) {
-    ALOGE("dvrGraphicsContextCreate: Failed to create surface: %s",
-          surface_status.GetErrorMessage().c_str());
-    return -surface_status.error();
-  }
-
-  auto surface_result = surface_status.take();
-
-  context->display_surface = surface_result.surface;
-  context->display_metrics = surface_result.metrics;
-  context->width = surface_result.width;
-  context->height = surface_result.height;
-  context->format = surface_result.format;
-
-  // Create an empty queue. NativeBufferQueue allocates the buffers for this
-  // queue.
-  auto queue_status = context->display_surface->CreateQueue();
-  if (!queue_status) {
-    ALOGE("dvrGraphicsContextCreate: Failed to create queue: %s",
-          queue_status.GetErrorMessage().c_str());
-    return -queue_status.error();
-  }
-
-  context->buffer_queue.reset(new NativeBufferQueue(
-      context->gl.egl_display, queue_status.take(), surface_result.width,
-      surface_result.height, surface_result.format, surface_result.usage,
-      surface_result.capacity));
-
-  // Create the metadata buffer.
-  auto metadata_status = CreateMetadataBuffer(context->display_surface,
-                                              surface_result.direct_surface);
-  if (!metadata_status) {
-    ALOGE("dvrGraphicsContextCreate: Failed to create metadata buffer: %s",
-          metadata_status.GetErrorMessage().c_str());
-    return -metadata_status.error();
-  }
-  std::tie(context->metadata_queue, context->metadata_buffer,
-           context->surface_metadata) = metadata_status.take();
-
-  // The way the call sequence works we need 1 more than the buffer queue
-  // capacity to store data for all pending frames
-  context->frame_history.Reset(context->buffer_queue->capacity() + 1);
-
-  context->vsync_client = VSyncClient::Create();
-  if (!context->vsync_client) {
-    ALOGE("dvrGraphicsContextCreate: failed to create vsync client");
-    return -ECOMM;
-  }
-
-  context->timerfd.Reset(timerfd_create(CLOCK_MONOTONIC, 0));
-  if (!context->timerfd) {
-    ALOGE("dvrGraphicsContextCreate: timerfd_create failed because: %s",
-          strerror(errno));
-    return -EPERM;
-  }
-
-  if (context->graphics_api == DVR_GRAPHICS_API_GLES) {
-    context->gl.texture_count =
-        (surface_result.geometry == DVR_SURFACE_GEOMETRY_SEPARATE_2) ? 2 : 1;
-
-    // Create the GL textures.
-    glGenTextures(context->gl.texture_count, context->gl.texture_id);
-
-    // We must make sure that we have at least one buffer allocated at this time
-    // so that anyone who tries to bind an FBO to context->texture_id
-    // will not get an incomplete buffer.
-    context->current_buffer = context->buffer_queue->Dequeue();
-    LOG_ALWAYS_FATAL_IF(context->gl.texture_count != 1);
-    for (int i = 0; i < context->gl.texture_count; ++i) {
-      glBindTexture(context->gl.texture_target_type, context->gl.texture_id[i]);
-      glEGLImageTargetTexture2DOES(context->gl.texture_target_type,
-                                   context->current_buffer->image_khr(i));
-    }
-    glBindTexture(context->gl.texture_target_type, 0);
-    CHECK_GL();
-
-    bool is_late_latch = false;
-
-    // Pass back the texture target type and id.
-    for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-      switch (p->key) {
-        case DVR_SURFACE_PARAMETER_ENABLE_LATE_LATCH_IN:
-          is_late_latch = !!p->value;
-          break;
-        case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_TYPE_OUT:
-          *static_cast<GLenum*>(p->value_out) = context->gl.texture_target_type;
-          break;
-        case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_ID_OUT:
-          for (int i = 0; i < context->gl.texture_count; ++i) {
-            *(static_cast<GLuint*>(p->value_out) + i) =
-                context->gl.texture_id[i];
-          }
-          break;
-      }
-    }
-
-    // Initialize late latch.
-    if (is_late_latch) {
-      LocalHandle fd = context->metadata_buffer->GetBlobFd();
-      context->late_latch.reset(
-          new android::dvr::LateLatch(true, std::move(fd)));
-    }
-  } else if (context->graphics_api == DVR_GRAPHICS_API_VULKAN) {
-    VkResult result = VK_SUCCESS;
-    // Create a VkSurfaceKHR from the ANativeWindow.
-    VkAndroidSurfaceCreateInfoKHR android_surface_ci = {};
-    android_surface_ci.sType =
-        VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
-    android_surface_ci.window = context.get();
-    result = vkCreateAndroidSurfaceKHR(
-        context->vk.instance, &android_surface_ci,
-        context->vk.allocation_callbacks, &context->vk.surface);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    VkBool32 surface_supports_present = VK_FALSE;
-    result = vkGetPhysicalDeviceSurfaceSupportKHR(
-        context->vk.physical_device, context->vk.present_queue_family,
-        context->vk.surface, &surface_supports_present);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    if (!surface_supports_present) {
-      ALOGE("Error: provided queue family (%u) does not support presentation",
-            context->vk.present_queue_family);
-      return -EPERM;
-    }
-    VkSurfaceCapabilitiesKHR surface_capabilities = {};
-    result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
-        context->vk.physical_device, context->vk.surface,
-        &surface_capabilities);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    // Determine the swapchain image format.
-    uint32_t device_surface_format_count = 0;
-    result = vkGetPhysicalDeviceSurfaceFormatsKHR(
-        context->vk.physical_device, context->vk.surface,
-        &device_surface_format_count, nullptr);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    std::vector<VkSurfaceFormatKHR> device_surface_formats(
-        device_surface_format_count);
-    result = vkGetPhysicalDeviceSurfaceFormatsKHR(
-        context->vk.physical_device, context->vk.surface,
-        &device_surface_format_count, device_surface_formats.data());
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    LOG_ALWAYS_FATAL_IF(device_surface_format_count == 0U);
-    LOG_ALWAYS_FATAL_IF(device_surface_formats[0].format ==
-                        VK_FORMAT_UNDEFINED);
-    VkSurfaceFormatKHR present_surface_format = device_surface_formats[0];
-    // Determine the swapchain present mode.
-    // TODO(cort): query device_present_modes to make sure MAILBOX is supported.
-    // But according to libvulkan, it is.
-    uint32_t device_present_mode_count = 0;
-    result = vkGetPhysicalDeviceSurfacePresentModesKHR(
-        context->vk.physical_device, context->vk.surface,
-        &device_present_mode_count, nullptr);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    std::vector<VkPresentModeKHR> device_present_modes(
-        device_present_mode_count);
-    result = vkGetPhysicalDeviceSurfacePresentModesKHR(
-        context->vk.physical_device, context->vk.surface,
-        &device_present_mode_count, device_present_modes.data());
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    VkPresentModeKHR present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
-    // Extract presentation surface extents, image count, transform, usages,
-    // etc.
-    LOG_ALWAYS_FATAL_IF(
-        static_cast<int>(surface_capabilities.currentExtent.width) == -1 ||
-        static_cast<int>(surface_capabilities.currentExtent.height) == -1);
-    VkExtent2D swapchain_extent = surface_capabilities.currentExtent;
-
-    uint32_t desired_image_count = surface_capabilities.minImageCount;
-    if (surface_capabilities.maxImageCount > 0 &&
-        desired_image_count > surface_capabilities.maxImageCount) {
-      desired_image_count = surface_capabilities.maxImageCount;
-    }
-    VkSurfaceTransformFlagBitsKHR surface_transform =
-        surface_capabilities.currentTransform;
-    VkImageUsageFlags image_usage_flags =
-        surface_capabilities.supportedUsageFlags;
-    LOG_ALWAYS_FATAL_IF(surface_capabilities.supportedCompositeAlpha ==
-                        static_cast<VkFlags>(0));
-    VkCompositeAlphaFlagBitsKHR composite_alpha =
-        VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-    if (!(surface_capabilities.supportedCompositeAlpha &
-          VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR)) {
-      composite_alpha = VkCompositeAlphaFlagBitsKHR(
-          static_cast<int>(surface_capabilities.supportedCompositeAlpha) &
-          -static_cast<int>(surface_capabilities.supportedCompositeAlpha));
-    }
-    // Create VkSwapchainKHR
-    VkSwapchainCreateInfoKHR swapchain_ci = {};
-    swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
-    swapchain_ci.pNext = nullptr;
-    swapchain_ci.surface = context->vk.surface;
-    swapchain_ci.minImageCount = desired_image_count;
-    swapchain_ci.imageFormat = present_surface_format.format;
-    swapchain_ci.imageColorSpace = present_surface_format.colorSpace;
-    swapchain_ci.imageExtent.width = swapchain_extent.width;
-    swapchain_ci.imageExtent.height = swapchain_extent.height;
-    swapchain_ci.imageUsage = image_usage_flags;
-    swapchain_ci.preTransform = surface_transform;
-    swapchain_ci.compositeAlpha = composite_alpha;
-    swapchain_ci.imageArrayLayers = 1;
-    swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
-    swapchain_ci.queueFamilyIndexCount = 0;
-    swapchain_ci.pQueueFamilyIndices = nullptr;
-    swapchain_ci.presentMode = present_mode;
-    swapchain_ci.clipped = VK_TRUE;
-    swapchain_ci.oldSwapchain = VK_NULL_HANDLE;
-    result = vkCreateSwapchainKHR(context->vk.device, &swapchain_ci,
-                                  context->vk.allocation_callbacks,
-                                  &context->vk.swapchain);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    // Create swapchain image views
-    uint32_t image_count = 0;
-    result = vkGetSwapchainImagesKHR(context->vk.device, context->vk.swapchain,
-                                     &image_count, nullptr);
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    LOG_ALWAYS_FATAL_IF(image_count == 0U);
-    context->vk.swapchain_images.resize(image_count);
-    result = vkGetSwapchainImagesKHR(context->vk.device, context->vk.swapchain,
-                                     &image_count,
-                                     context->vk.swapchain_images.data());
-    LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    context->vk.swapchain_image_views.resize(image_count);
-    VkImageViewCreateInfo image_view_ci = {};
-    image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
-    image_view_ci.pNext = nullptr;
-    image_view_ci.flags = 0;
-    image_view_ci.format = swapchain_ci.imageFormat;
-    image_view_ci.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
-    image_view_ci.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
-    image_view_ci.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
-    image_view_ci.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
-    image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
-    image_view_ci.subresourceRange.baseMipLevel = 0;
-    image_view_ci.subresourceRange.levelCount = 1;
-    image_view_ci.subresourceRange.baseArrayLayer = 0;
-    image_view_ci.subresourceRange.layerCount = 1;
-    image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
-    image_view_ci.image = VK_NULL_HANDLE;  // filled in below
-    for (uint32_t i = 0; i < image_count; ++i) {
-      image_view_ci.image = context->vk.swapchain_images[i];
-      result = vkCreateImageView(context->vk.device, &image_view_ci,
-                                 context->vk.allocation_callbacks,
-                                 &context->vk.swapchain_image_views[i]);
-      LOG_ALWAYS_FATAL_IF(result != VK_SUCCESS);
-    }
-    // Fill in any requested output parameters.
-    for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
-      switch (p->key) {
-        case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_COUNT_OUT:
-          *static_cast<uint32_t*>(p->value_out) = image_count;
-          break;
-        case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT:
-          *static_cast<VkFormat*>(p->value_out) = swapchain_ci.imageFormat;
-          break;
-      }
-    }
-  }
-
-  *return_graphics_context = context.release();
-  return 0;
-}
-
-void dvrGraphicsContextDestroy(DvrGraphicsContext* graphics_context) {
-  delete graphics_context;
-}
-
-// ANativeWindow function implementations. These should only be used
-// by the Vulkan path.
-int DvrGraphicsContext::Post(android::dvr::NativeBufferProducer* buffer,
-                             int fence_fd) {
-  LOG_ALWAYS_FATAL_IF(graphics_api != DVR_GRAPHICS_API_VULKAN);
-  ATRACE_NAME(__PRETTY_FUNCTION__);
-  ALOGI_IF(TRACE, "DvrGraphicsContext::Post: buffer_id=%d, fence_fd=%d",
-           buffer->buffer()->id(), fence_fd);
-  ALOGW_IF(!display_surface->visible(),
-           "DvrGraphicsContext::Post: Posting buffer on invisible surface!!!");
-  // The NativeBufferProducer closes the fence fd, so dup it for tracking in the
-  // frame history.
-  frame_history.OnFrameSubmit(LocalHandle::AsDuplicate(fence_fd));
-  int result = buffer->Post(fence_fd, 0);
-  return result;
-}
-
-int DvrGraphicsContext::SetSwapInterval(ANativeWindow* window, int interval) {
-  ALOGI_IF(TRACE, "SetSwapInterval: window=%p interval=%d", window, interval);
-  DvrGraphicsContext* self = getSelf(window);
-  (void)self;
-  LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
-  return android::NO_ERROR;
-}
-
-int DvrGraphicsContext::DequeueBuffer(ANativeWindow* window,
-                                      ANativeWindowBuffer** buffer,
-                                      int* fence_fd) {
-  ATRACE_NAME(__PRETTY_FUNCTION__);
-
-  DvrGraphicsContext* self = getSelf(window);
-  LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
-  std::lock_guard<std::mutex> autolock(self->lock_);
-
-  if (!self->current_buffer) {
-    self->current_buffer = self->buffer_queue->Dequeue();
-  }
-  ATRACE_ASYNC_BEGIN("BufferDraw", self->current_buffer->buffer()->id());
-  *fence_fd = self->current_buffer->ClaimReleaseFence().Release();
-  *buffer = self->current_buffer;
-
-  ALOGI_IF(TRACE, "DvrGraphicsContext::DequeueBuffer: fence_fd=%d", *fence_fd);
-  return android::NO_ERROR;
-}
-
-int DvrGraphicsContext::QueueBuffer(ANativeWindow* window,
-                                    ANativeWindowBuffer* buffer, int fence_fd) {
-  ATRACE_NAME("NativeWindow::QueueBuffer");
-  ALOGI_IF(TRACE, "NativeWindow::QueueBuffer: fence_fd=%d", fence_fd);
-
-  DvrGraphicsContext* self = getSelf(window);
-  LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
-  std::lock_guard<std::mutex> autolock(self->lock_);
-
-  android::dvr::NativeBufferProducer* native_buffer =
-      static_cast<android::dvr::NativeBufferProducer*>(buffer);
-  ATRACE_ASYNC_END("BufferDraw", native_buffer->buffer()->id());
-  bool do_post = true;
-  if (self->buffer_already_posted) {
-    // Check that the buffer is the one we expect, but handle it if this happens
-    // in production by allowing this buffer to post on top of the previous one.
-    LOG_FATAL_IF(native_buffer != self->current_buffer);
-    if (native_buffer == self->current_buffer) {
-      do_post = false;
-      if (fence_fd >= 0)
-        close(fence_fd);
-    }
-  }
-  if (do_post) {
-    ATRACE_ASYNC_BEGIN("BufferPost", native_buffer->buffer()->id());
-    self->Post(native_buffer, fence_fd);
-  }
-  self->buffer_already_posted = false;
-  self->current_buffer = nullptr;
-
-  return android::NO_ERROR;
-}
-
-int DvrGraphicsContext::CancelBuffer(ANativeWindow* window,
-                                     ANativeWindowBuffer* buffer,
-                                     int fence_fd) {
-  ATRACE_NAME("DvrGraphicsContext::CancelBuffer");
-  ALOGI_IF(TRACE, "DvrGraphicsContext::CancelBuffer: fence_fd: %d", fence_fd);
-
-  DvrGraphicsContext* self = getSelf(window);
-  LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
-  std::lock_guard<std::mutex> autolock(self->lock_);
-
-  android::dvr::NativeBufferProducer* native_buffer =
-      static_cast<android::dvr::NativeBufferProducer*>(buffer);
-  ATRACE_ASYNC_END("BufferDraw", native_buffer->buffer()->id());
-  ATRACE_INT("CancelBuffer", native_buffer->buffer()->id());
-  bool do_enqueue = true;
-  if (self->buffer_already_posted) {
-    // Check that the buffer is the one we expect, but handle it if this happens
-    // in production by returning this buffer to the buffer queue.
-    LOG_FATAL_IF(native_buffer != self->current_buffer);
-    if (native_buffer == self->current_buffer) {
-      do_enqueue = false;
-    }
-  }
-  if (do_enqueue) {
-    self->buffer_queue->Enqueue(native_buffer);
-  }
-  if (fence_fd >= 0)
-    close(fence_fd);
-  self->buffer_already_posted = false;
-  self->current_buffer = nullptr;
-
-  return android::NO_ERROR;
-}
-
-int DvrGraphicsContext::Query(const ANativeWindow* window, int what,
-                              int* value) {
-  DvrGraphicsContext* self = getSelf(const_cast<ANativeWindow*>(window));
-  LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
-  std::lock_guard<std::mutex> autolock(self->lock_);
-
-  switch (what) {
-    case NATIVE_WINDOW_WIDTH:
-      *value = self->width;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_HEIGHT:
-      *value = self->height;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_FORMAT:
-      *value = self->format;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
-      *value = 1;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_CONCRETE_TYPE:
-      *value = NATIVE_WINDOW_SURFACE;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER:
-      *value = 1;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_DEFAULT_WIDTH:
-      *value = self->width;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_DEFAULT_HEIGHT:
-      *value = self->height;
-      return android::NO_ERROR;
-    case NATIVE_WINDOW_TRANSFORM_HINT:
-      *value = 0;
-      return android::NO_ERROR;
-  }
-
-  *value = 0;
-  return android::BAD_VALUE;
-}
-
-int DvrGraphicsContext::Perform(ANativeWindow* window, int operation, ...) {
-  DvrGraphicsContext* self = getSelf(window);
-  LOG_ALWAYS_FATAL_IF(self->graphics_api != DVR_GRAPHICS_API_VULKAN);
-  std::lock_guard<std::mutex> autolock(self->lock_);
-
-  va_list args;
-  va_start(args, operation);
-
-  // TODO(eieio): The following operations are not used at this time. They are
-  // included here to help document which operations may be useful and what
-  // parameters they take.
-  switch (operation) {
-    case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: {
-      int w = va_arg(args, int);
-      int h = va_arg(args, int);
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: w=%d h=%d", w, h);
-      return android::NO_ERROR;
-    }
-
-    case NATIVE_WINDOW_SET_BUFFERS_FORMAT: {
-      int format = va_arg(args, int);
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_FORMAT: format=%d", format);
-      return android::NO_ERROR;
-    }
-
-    case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: {
-      int transform = va_arg(args, int);
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: transform=%d",
-               transform);
-      return android::NO_ERROR;
-    }
-
-    case NATIVE_WINDOW_SET_USAGE: {
-      int usage = va_arg(args, int);
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_USAGE: usage=%d", usage);
-      return android::NO_ERROR;
-    }
-
-    case NATIVE_WINDOW_CONNECT:
-    case NATIVE_WINDOW_DISCONNECT:
-    case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
-    case NATIVE_WINDOW_API_CONNECT:
-    case NATIVE_WINDOW_API_DISCONNECT:
-      // TODO(eieio): we should implement these
-      return android::NO_ERROR;
-
-    case NATIVE_WINDOW_SET_BUFFER_COUNT: {
-      int buffer_count = va_arg(args, int);
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFER_COUNT: bufferCount=%d",
-               buffer_count);
-      return android::NO_ERROR;
-    }
-    case NATIVE_WINDOW_SET_BUFFERS_DATASPACE: {
-      android_dataspace_t data_space =
-          static_cast<android_dataspace_t>(va_arg(args, int));
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_DATASPACE: dataSpace=%d",
-               data_space);
-      return android::NO_ERROR;
-    }
-    case NATIVE_WINDOW_SET_SCALING_MODE: {
-      int mode = va_arg(args, int);
-      ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_SCALING_MODE: mode=%d", mode);
-      return android::NO_ERROR;
-    }
-
-    case NATIVE_WINDOW_LOCK:
-    case NATIVE_WINDOW_UNLOCK_AND_POST:
-    case NATIVE_WINDOW_SET_CROP:
-    case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
-      return android::INVALID_OPERATION;
-  }
-
-  return android::NAME_NOT_FOUND;
-}
-
-int DvrGraphicsContext::DequeueBuffer_DEPRECATED(ANativeWindow* window,
-                                                 ANativeWindowBuffer** buffer) {
-  int fence_fd = -1;
-  int ret = DequeueBuffer(window, buffer, &fence_fd);
-
-  // wait for fence
-  if (ret == android::NO_ERROR && fence_fd != -1)
-    close(fence_fd);
-
-  return ret;
-}
-
-int DvrGraphicsContext::CancelBuffer_DEPRECATED(ANativeWindow* window,
-                                                ANativeWindowBuffer* buffer) {
-  return CancelBuffer(window, buffer, -1);
-}
-
-int DvrGraphicsContext::QueueBuffer_DEPRECATED(ANativeWindow* window,
-                                               ANativeWindowBuffer* buffer) {
-  return QueueBuffer(window, buffer, -1);
-}
-
-int DvrGraphicsContext::LockBuffer_DEPRECATED(ANativeWindow* /*window*/,
-                                              ANativeWindowBuffer* /*buffer*/) {
-  return android::NO_ERROR;
-}
-// End ANativeWindow implementation
-
-int dvrSetEdsPose(DvrGraphicsContext* graphics_context,
-                  float32x4_t render_pose_orientation,
-                  float32x4_t render_pose_translation) {
-  ATRACE_NAME("dvrSetEdsPose");
-  if (!graphics_context->current_buffer) {
-    ALOGE("dvrBeginRenderFrame must be called before dvrSetEdsPose");
-    return -EPERM;
-  }
-
-  // When late-latching is enabled, the pose buffer is written by the GPU, so
-  // we don't touch it here.
-  float32x4_t is_late_latch = DVR_POSE_LATE_LATCH;
-  if (render_pose_orientation[0] != is_late_latch[0]) {
-    volatile DisplaySurfaceMetadata* data = graphics_context->surface_metadata;
-    uint32_t buffer_index = 0;
-    ALOGE_IF(TRACE, "write pose index %d %f %f", buffer_index,
-             render_pose_orientation[0], render_pose_orientation[1]);
-    data->orientation[buffer_index] = render_pose_orientation;
-    data->translation[buffer_index] = render_pose_translation;
-  }
-
-  return 0;
-}
-
-int dvrBeginRenderFrameEds(DvrGraphicsContext* graphics_context,
-                           float32x4_t render_pose_orientation,
-                           float32x4_t render_pose_translation) {
-  ATRACE_NAME("dvrBeginRenderFrameEds");
-  LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api != DVR_GRAPHICS_API_GLES);
-  CHECK_GL();
-  // Grab a buffer from the queue and set its pose.
-  if (!graphics_context->current_buffer) {
-    graphics_context->current_buffer =
-        graphics_context->buffer_queue->Dequeue();
-  }
-
-  int ret = dvrSetEdsPose(graphics_context, render_pose_orientation,
-                          render_pose_translation);
-  if (ret < 0)
-    return ret;
-
-  ATRACE_ASYNC_BEGIN("BufferDraw",
-                     graphics_context->current_buffer->buffer()->id());
-
-  {
-    ATRACE_NAME("glEGLImageTargetTexture2DOES");
-    // Bind the texture to the latest buffer in the queue.
-    for (int i = 0; i < graphics_context->gl.texture_count; ++i) {
-      glBindTexture(graphics_context->gl.texture_target_type,
-                    graphics_context->gl.texture_id[i]);
-      glEGLImageTargetTexture2DOES(
-          graphics_context->gl.texture_target_type,
-          graphics_context->current_buffer->image_khr(i));
-    }
-    glBindTexture(graphics_context->gl.texture_target_type, 0);
-  }
-  CHECK_GL();
-  return 0;
-}
-
-int dvrBeginRenderFrameEdsVk(DvrGraphicsContext* graphics_context,
-                             float32x4_t render_pose_orientation,
-                             float32x4_t render_pose_translation,
-                             VkSemaphore acquire_semaphore,
-                             VkFence acquire_fence,
-                             uint32_t* swapchain_image_index,
-                             VkImageView* swapchain_image_view) {
-  ATRACE_NAME("dvrBeginRenderFrameEds");
-  LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api !=
-                      DVR_GRAPHICS_API_VULKAN);
-
-  // Acquire a swapchain image. This calls Dequeue() internally.
-  VkResult result = vkAcquireNextImageKHR(
-      graphics_context->vk.device, graphics_context->vk.swapchain, UINT64_MAX,
-      acquire_semaphore, acquire_fence, swapchain_image_index);
-  if (result != VK_SUCCESS)
-    return -EINVAL;
-
-  // Set the pose pose.
-  int ret = dvrSetEdsPose(graphics_context, render_pose_orientation,
-                          render_pose_translation);
-  if (ret < 0)
-    return ret;
-  *swapchain_image_view =
-      graphics_context->vk.swapchain_image_views[*swapchain_image_index];
-  return 0;
-}
-
-int dvrBeginRenderFrame(DvrGraphicsContext* graphics_context) {
-  return dvrBeginRenderFrameEds(graphics_context, DVR_POSE_NO_EDS,
-                                DVR_POSE_NO_EDS);
-}
-int dvrBeginRenderFrameVk(DvrGraphicsContext* graphics_context,
-                          VkSemaphore acquire_semaphore, VkFence acquire_fence,
-                          uint32_t* swapchain_image_index,
-                          VkImageView* swapchain_image_view) {
-  return dvrBeginRenderFrameEdsVk(
-      graphics_context, DVR_POSE_NO_EDS, DVR_POSE_NO_EDS, acquire_semaphore,
-      acquire_fence, swapchain_image_index, swapchain_image_view);
-}
-
-int dvrBeginRenderFrameLateLatch(DvrGraphicsContext* graphics_context,
-                                 uint32_t /*flags*/,
-                                 uint32_t target_vsync_count, int num_views,
-                                 const float** projection_matrices,
-                                 const float** eye_from_head_matrices,
-                                 const float** pose_offset_matrices,
-                                 uint32_t* out_late_latch_buffer_id) {
-  if (!graphics_context->late_latch) {
-    return -EPERM;
-  }
-  if (num_views > DVR_GRAPHICS_SURFACE_MAX_VIEWS) {
-    ALOGE("dvrBeginRenderFrameLateLatch called with too many views.");
-    return -EINVAL;
-  }
-  dvrBeginRenderFrameEds(graphics_context, DVR_POSE_LATE_LATCH,
-                         DVR_POSE_LATE_LATCH);
-  auto& ll = graphics_context->late_latch;
-  // TODO(jbates) Need to change this shader so that it dumps the single
-  // captured pose for both eyes into the display surface metadata buffer at
-  // the right index.
-  android::dvr::LateLatchInput input;
-  memset(&input, 0, sizeof(input));
-  for (int i = 0; i < num_views; ++i) {
-    memcpy(input.proj_mat + i, *(projection_matrices + i), 16 * sizeof(float));
-    memcpy(input.eye_from_head_mat + i, *(eye_from_head_matrices + i),
-           16 * sizeof(float));
-    memcpy(input.pose_offset + i, *(pose_offset_matrices + i),
-           16 * sizeof(float));
-  }
-  input.pose_index =
-      target_vsync_count & android::dvr::kPoseAsyncBufferIndexMask;
-  input.render_pose_index =
-      graphics_context->current_buffer->surface_buffer_index();
-  ll->AddLateLatch(input);
-  *out_late_latch_buffer_id = ll->output_buffer_id();
-  return 0;
-}
-
-extern "C" int dvrGraphicsWaitNextFrame(
-    DvrGraphicsContext* graphics_context, int64_t start_delay_ns,
-    DvrFrameSchedule* out_next_frame_schedule) {
-  start_delay_ns = std::max(start_delay_ns, static_cast<int64_t>(0));
-
-  // We only do one-shot timers:
-  int64_t wake_time_ns = 0;
-
-  uint32_t current_frame_vsync;
-  int64_t current_frame_scheduled_finish_ns;
-  int64_t vsync_period_ns;
-
-  int fetch_schedule_result = graphics_context->vsync_client->GetSchedInfo(
-      &vsync_period_ns, &current_frame_scheduled_finish_ns,
-      &current_frame_vsync);
-  if (fetch_schedule_result == 0) {
-    wake_time_ns = current_frame_scheduled_finish_ns + start_delay_ns;
-    // If the last wakeup time is still in the future, use it instead to avoid
-    // major schedule jumps when applications call WaitNextFrame with
-    // aggressive offsets.
-    int64_t now = android::dvr::GetSystemClockNs();
-    if (android::dvr::TimestampGT(wake_time_ns - vsync_period_ns, now)) {
-      wake_time_ns -= vsync_period_ns;
-      --current_frame_vsync;
-    }
-    // If the next wakeup time is in the past, add a vsync period to keep the
-    // application on schedule.
-    if (android::dvr::TimestampLT(wake_time_ns, now)) {
-      wake_time_ns += vsync_period_ns;
-      ++current_frame_vsync;
-    }
-  } else {
-    ALOGE("Error getting frame schedule because: %s",
-          strerror(-fetch_schedule_result));
-    // Sleep for a vsync period to avoid cascading failure.
-    wake_time_ns = android::dvr::GetSystemClockNs() +
-                   graphics_context->display_metrics.vsync_period_ns;
-  }
-
-  // Adjust nsec to [0..999,999,999].
-  struct itimerspec wake_time;
-  wake_time.it_interval.tv_sec = 0;
-  wake_time.it_interval.tv_nsec = 0;
-  wake_time.it_value = android::dvr::NsToTimespec(wake_time_ns);
-  bool sleep_result =
-      timerfd_settime(graphics_context->timerfd.Get(), TFD_TIMER_ABSTIME,
-                      &wake_time, nullptr) == 0;
-  if (sleep_result) {
-    ATRACE_NAME("sleep");
-    uint64_t expirations = 0;
-    sleep_result = read(graphics_context->timerfd.Get(), &expirations,
-                        sizeof(uint64_t)) == sizeof(uint64_t);
-    if (!sleep_result) {
-      ALOGE("Error: timerfd read failed");
-    }
-  } else {
-    ALOGE("Error: timerfd_settime failed because: %s", strerror(errno));
-  }
-
-  auto& frame_history = graphics_context->frame_history;
-  frame_history.CheckForFinishedFrames();
-  if (fetch_schedule_result == 0) {
-    uint32_t next_frame_vsync =
-        current_frame_vsync +
-        frame_history.PredictNextFrameVsyncInterval(vsync_period_ns);
-    int64_t next_frame_scheduled_finish =
-        (wake_time_ns - start_delay_ns) + vsync_period_ns;
-    frame_history.OnFrameStart(next_frame_vsync, next_frame_scheduled_finish);
-    if (out_next_frame_schedule) {
-      out_next_frame_schedule->vsync_count = next_frame_vsync;
-      out_next_frame_schedule->scheduled_frame_finish_ns =
-          next_frame_scheduled_finish;
-    }
-  } else {
-    frame_history.OnFrameStart(UINT32_MAX, -1);
-  }
-
-  return (fetch_schedule_result == 0 && sleep_result) ? 0 : -1;
-}
-
-extern "C" void dvrGraphicsPostEarly(DvrGraphicsContext* graphics_context) {
-  ATRACE_NAME("dvrGraphicsPostEarly");
-  ALOGI_IF(TRACE, "dvrGraphicsPostEarly");
-
-  LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api != DVR_GRAPHICS_API_GLES);
-
-  // Note that this function can be called before or after
-  // dvrBeginRenderFrame.
-  if (!graphics_context->buffer_already_posted) {
-    graphics_context->buffer_already_posted = true;
-
-    if (!graphics_context->current_buffer) {
-      graphics_context->current_buffer =
-          graphics_context->buffer_queue->Dequeue();
-    }
-
-    auto buffer = graphics_context->current_buffer->buffer().get();
-    ATRACE_ASYNC_BEGIN("BufferPost", buffer->id());
-    int result = buffer->Post<void>(LocalHandle());
-    if (result < 0)
-      ALOGE("Buffer post failed: %d (%s)", result, strerror(-result));
-  }
-}
-
-int dvrPresent(DvrGraphicsContext* graphics_context) {
-  LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api != DVR_GRAPHICS_API_GLES);
-
-  std::array<char, 128> buf;
-  snprintf(buf.data(), buf.size(), "dvrPresent|vsync=%d|",
-           graphics_context->frame_history.GetCurrentFrameVsync());
-  ATRACE_NAME(buf.data());
-
-  if (!graphics_context->current_buffer) {
-    ALOGE("Error: dvrPresent called without dvrBeginRenderFrame");
-    return -EPERM;
-  }
-
-  LocalHandle fence_fd =
-      android::dvr::CreateGLSyncAndFlush(graphics_context->gl.egl_display);
-
-  ALOGI_IF(TRACE, "PostBuffer: buffer_id=%d, fence_fd=%d",
-           graphics_context->current_buffer->buffer()->id(), fence_fd.Get());
-  ALOGW_IF(!graphics_context->display_surface->visible(),
-           "PostBuffer: Posting buffer on invisible surface!!!");
-
-  auto buffer = graphics_context->current_buffer->buffer().get();
-  ATRACE_ASYNC_END("BufferDraw", buffer->id());
-  if (!graphics_context->buffer_already_posted) {
-    ATRACE_ASYNC_BEGIN("BufferPost", buffer->id());
-    int result = buffer->Post<void>(fence_fd);
-    if (result < 0)
-      ALOGE("Buffer post failed: %d (%s)", result, strerror(-result));
-  }
-
-  graphics_context->frame_history.OnFrameSubmit(std::move(fence_fd));
-  graphics_context->buffer_already_posted = false;
-  graphics_context->current_buffer = nullptr;
-  return 0;
-}
-
-int dvrPresentVk(DvrGraphicsContext* graphics_context,
-                 VkSemaphore submit_semaphore, uint32_t swapchain_image_index) {
-  LOG_ALWAYS_FATAL_IF(graphics_context->graphics_api !=
-                      DVR_GRAPHICS_API_VULKAN);
-
-  std::array<char, 128> buf;
-  snprintf(buf.data(), buf.size(), "dvrPresent|vsync=%d|",
-           graphics_context->frame_history.GetCurrentFrameVsync());
-  ATRACE_NAME(buf.data());
-
-  if (!graphics_context->current_buffer) {
-    ALOGE("Error: dvrPresentVk called without dvrBeginRenderFrameVk");
-    return -EPERM;
-  }
-
-  // Present the specified image. Internally, this gets a fence from the
-  // Vulkan driver and passes it to DvrGraphicsContext::Post(),
-  // which in turn passes it to buffer->Post() and adds it to frame_history.
-  VkPresentInfoKHR present_info = {};
-  present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
-  present_info.swapchainCount = 1;
-  present_info.pSwapchains = &graphics_context->vk.swapchain;
-  present_info.pImageIndices = &swapchain_image_index;
-  present_info.waitSemaphoreCount =
-      (submit_semaphore != VK_NULL_HANDLE) ? 1 : 0;
-  present_info.pWaitSemaphores = &submit_semaphore;
-  VkResult result =
-      vkQueuePresentKHR(graphics_context->vk.present_queue, &present_info);
-  if (result != VK_SUCCESS) {
-    return -EINVAL;
-  }
-
-  return 0;
-}
-
-extern "C" int dvrGetFrameScheduleResults(DvrGraphicsContext* context,
-                                          DvrFrameScheduleResult* results,
-                                          int in_result_count) {
-  if (!context || !results)
-    return -EINVAL;
-
-  return context->frame_history.GetPreviousFrameResults(results,
-                                                        in_result_count);
-}
-
-extern "C" void dvrGraphicsSurfaceSetVisible(
-    DvrGraphicsContext* graphics_context, int visible) {
-  graphics_context->display_surface->SetVisible(visible);
-}
-
-extern "C" int dvrGraphicsSurfaceGetVisible(
-    DvrGraphicsContext* graphics_context) {
-  return !!graphics_context->display_surface->visible();
-}
-
-extern "C" void dvrGraphicsSurfaceSetZOrder(
-    DvrGraphicsContext* graphics_context, int z_order) {
-  graphics_context->display_surface->SetZOrder(z_order);
-}
-
-extern "C" int dvrGraphicsSurfaceGetZOrder(
-    DvrGraphicsContext* graphics_context) {
-  return graphics_context->display_surface->z_order();
-}
diff --git a/libs/vr/libdisplay/include/dvr/graphics.h b/libs/vr/libdisplay/include/dvr/graphics.h
deleted file mode 100644
index c83a698..0000000
--- a/libs/vr/libdisplay/include/dvr/graphics.h
+++ /dev/null
@@ -1,450 +0,0 @@
-#ifndef DVR_GRAPHICS_H_
-#define DVR_GRAPHICS_H_
-
-#include <EGL/egl.h>
-#include <sys/cdefs.h>
-
-#ifdef __ARM_NEON
-#include <arm_neon.h>
-#else
-#ifndef __FLOAT32X4T_86
-#define __FLOAT32X4T_86
-typedef float float32x4_t __attribute__((__vector_size__(16)));
-typedef struct float32x4x4_t { float32x4_t val[4]; };
-#endif
-#endif
-
-#ifndef VK_USE_PLATFORM_ANDROID_KHR
-#define VK_USE_PLATFORM_ANDROID_KHR 1
-#endif
-#include <vulkan/vulkan.h>
-
-__BEGIN_DECLS
-
-// Display surface parameters used to specify display surface options.
-enum {
-  DVR_SURFACE_PARAMETER_NONE = 0,
-  // WIDTH
-  DVR_SURFACE_PARAMETER_WIDTH_IN,
-  // HEIGHT
-  DVR_SURFACE_PARAMETER_HEIGHT_IN,
-  // DISABLE_DISTORTION
-  DVR_SURFACE_PARAMETER_DISABLE_DISTORTION_IN,
-  // DISABLE_STABILIZATION
-  DVR_SURFACE_PARAMETER_DISABLE_STABILIZATION_IN,
-  // Disable chromatic aberration correction
-  DVR_SURFACE_PARAMETER_DISABLE_CAC_IN,
-  // ENABLE_LATE_LATCH: Enable late latching of pose data for application
-  // GPU shaders.
-  DVR_SURFACE_PARAMETER_ENABLE_LATE_LATCH_IN,
-  // VISIBLE
-  DVR_SURFACE_PARAMETER_VISIBLE_IN,
-  // Z_ORDER
-  DVR_SURFACE_PARAMETER_Z_ORDER_IN,
-  // EXCLUDE_FROM_BLUR
-  DVR_SURFACE_PARAMETER_EXCLUDE_FROM_BLUR_IN,
-  // BLUR_BEHIND
-  DVR_SURFACE_PARAMETER_BLUR_BEHIND_IN,
-  // DISPLAY_WIDTH
-  DVR_SURFACE_PARAMETER_DISPLAY_WIDTH_OUT,
-  // DISPLAY_HEIGHT
-  DVR_SURFACE_PARAMETER_DISPLAY_HEIGHT_OUT,
-  // SURFACE_WIDTH: Returns width of allocated surface buffer.
-  DVR_SURFACE_PARAMETER_SURFACE_WIDTH_OUT,
-  // SURFACE_HEIGHT: Returns height of allocated surface buffer.
-  DVR_SURFACE_PARAMETER_SURFACE_HEIGHT_OUT,
-  // INTER_LENS_METERS: Returns float value in meters, the distance between
-  // lenses.
-  DVR_SURFACE_PARAMETER_INTER_LENS_METERS_OUT,
-  // LEFT_FOV_LRBT: Return storage must have room for array of 4 floats (in
-  // radians). The layout is left, right, bottom, top as indicated by LRBT.
-  DVR_SURFACE_PARAMETER_LEFT_FOV_LRBT_OUT,
-  // RIGHT_FOV_LRBT: Return storage must have room for array of 4 floats (in
-  // radians). The layout is left, right, bottom, top as indicated by LRBT.
-  DVR_SURFACE_PARAMETER_RIGHT_FOV_LRBT_OUT,
-  // VSYNC_PERIOD: Returns the period of the display refresh (in
-  // nanoseconds per refresh), as a 64-bit unsigned integer.
-  DVR_SURFACE_PARAMETER_VSYNC_PERIOD_OUT,
-  // SURFACE_TEXTURE_TARGET_TYPE: Returns the type of texture used as the render
-  // target.
-  DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_TYPE_OUT,
-  // SURFACE_TEXTURE_TARGET_ID: Returns the texture ID used as the render
-  // target.
-  DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_ID_OUT,
-  // Whether the surface needs to be flipped vertically before display. Default
-  // is 0.
-  DVR_SURFACE_PARAMETER_VERTICAL_FLIP_IN,
-  // A bool indicating whether or not to create a GL context for the surface.
-  // 0: don't create a context
-  // Non-zero: create a context.
-  // Default is 1.
-  // If this value is 0, there must be a GLES 3.2 or greater context bound on
-  // the current thread at the time dvrGraphicsContextCreate is called.
-  DVR_SURFACE_PARAMETER_CREATE_GL_CONTEXT_IN,
-  // Specify one of DVR_SURFACE_GEOMETRY_*.
-  DVR_SURFACE_PARAMETER_GEOMETRY_IN,
-  // FORMAT: One of DVR_SURFACE_FORMAT_RGBA_8888 or DVR_SURFACE_FORMAT_RGB_565.
-  // Default is DVR_SURFACE_FORMAT_RGBA_8888.
-  DVR_SURFACE_PARAMETER_FORMAT_IN,
-  // GRAPHICS_API: One of DVR_SURFACE_GRAPHICS_API_GLES or
-  // DVR_SURFACE_GRAPHICS_API_VULKAN. Default is GLES.
-  DVR_SURFACE_PARAMETER_GRAPHICS_API_IN,
-  // VK_INSTANCE: In Vulkan mode, the application creates a VkInstance and
-  // passes it in.
-  DVR_SURFACE_PARAMETER_VK_INSTANCE_IN,
-  // VK_PHYSICAL_DEVICE: In Vulkan mode, the application passes in the
-  // PhysicalDevice handle corresponding to the logical device passed to
-  // VK_DEVICE.
-  DVR_SURFACE_PARAMETER_VK_PHYSICAL_DEVICE_IN,
-  // VK_DEVICE: In Vulkan mode, the application creates a VkDevice and
-  // passes it in.
-  DVR_SURFACE_PARAMETER_VK_DEVICE_IN,
-  // VK_PRESENT_QUEUE: In Vulkan mode, the application selects a
-  // presentation-compatible VkQueue and passes it in.
-  DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_IN,
-  // VK_PRESENT_QUEUE_FAMILY: In Vulkan mode, the application passes in the
-  // index of the queue family containing the VkQueue passed to
-  // VK_PRESENT_QUEUE.
-  DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_FAMILY_IN,
-  // VK_SWAPCHAIN_IMAGE_COUNT: In Vulkan mode, the number of swapchain images
-  // will be returned here.
-  DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_COUNT_OUT,
-  // VK_SWAPCHAIN_IMAGE_FORMAT: In Vulkan mode, the VkFormat of the swapchain
-  // images will be returned here.
-  DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT,
-  // DIRECT: Whether the surface goes directly to the display or to the
-  // compositor. Default is 0 (compositor). Only processes with either uid=root
-  // (test tools) or uid validated by IsTrustedUid() may set this to 1.
-  DVR_SURFACE_PARAMETER_DIRECT_IN,
-};
-
-enum {
-  // Default surface type. One wide buffer with the left eye view in the left
-  // half and the right eye view in the right half.
-  DVR_SURFACE_GEOMETRY_SINGLE,
-  // Separate buffers, one per eye. The width parameters still refer to the
-  // total width (2 * eye view width).
-  DVR_SURFACE_GEOMETRY_SEPARATE_2,
-};
-
-// Surface format. Gvr only supports RGBA_8888 and RGB_565 for now, so those are
-// the only formats we provide here.
-enum {
-  DVR_SURFACE_FORMAT_RGBA_8888,
-  DVR_SURFACE_FORMAT_RGB_565,
-};
-
-enum {
-  // Graphics contexts are created for OpenGL ES client applications by default.
-  DVR_GRAPHICS_API_GLES,
-  // Create the graphics context for Vulkan client applications.
-  DVR_GRAPHICS_API_VULKAN,
-};
-
-#define DVR_SURFACE_PARAMETER_IN(name, value) \
-  { DVR_SURFACE_PARAMETER_##name##_IN, (value), NULL }
-#define DVR_SURFACE_PARAMETER_OUT(name, value) \
-  { DVR_SURFACE_PARAMETER_##name##_OUT, 0, (value) }
-#define DVR_SURFACE_PARAMETER_LIST_END \
-  { DVR_SURFACE_PARAMETER_NONE, 0, NULL }
-
-struct DvrSurfaceParameter {
-  int32_t key;
-  int64_t value;
-  void* value_out;
-};
-
-// This is a convenience struct to hold the relevant information of the HMD
-// lenses.
-struct DvrLensInfo {
-  float inter_lens_meters;
-  float left_fov[4];
-  float right_fov[4];
-};
-
-int dvrGetNativeDisplayDimensions(int* native_width, int* native_height);
-
-typedef struct DvrReadBuffer DvrReadBuffer;
-
-// Opaque struct that represents a graphics context, the texture swap chain,
-// and surfaces.
-typedef struct DvrGraphicsContext DvrGraphicsContext;
-
-// Create the graphics context. with the given parameters. The list of
-// parameters is terminated with an entry where key ==
-// DVR_SURFACE_PARAMETER_NONE. For example, the parameters array could be built
-// as follows:
-//   int display_width = 0, display_height = 0;
-//   int surface_width = 0, surface_height = 0;
-//   float inter_lens_meters = 0.0f;
-//   float left_fov[4] = {0.0f};
-//   float right_fov[4] = {0.0f};
-//   int disable_warp = 0;
-//   DvrSurfaceParameter surface_params[] = {
-//       DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
-//       DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
-//       DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
-//       DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
-//       DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
-//       DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
-//       DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
-//       DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
-//       DVR_SURFACE_PARAMETER_LIST_END,
-//   };
-int dvrGraphicsContextCreate(struct DvrSurfaceParameter* parameters,
-                             DvrGraphicsContext** return_graphics_context);
-
-// Destroy the graphics context.
-void dvrGraphicsContextDestroy(DvrGraphicsContext* graphics_context);
-
-// For every frame a schedule is decided by the system compositor. A sample
-// schedule for two frames is shown below.
-//
-// |                        |                        |
-// |-----------------|------|-----------------|------|
-// |                        |                        |
-// V0                A1     V1                A2     V2
-//
-// V0, V1, and V2 are display vsync events. Vsync events are uniquely identified
-// throughout the DVR system by a vsync count maintained by the system
-// compositor.
-//
-// A1 and A2 indicate when the application should finish rendering its frame,
-// including all GPU work. Under normal circumstances the scheduled finish
-// finish time will be set a few milliseconds before the vsync time, to give the
-// compositor time to perform distortion and EDS on the app's buffer. For apps
-// that don't use system distortion the scheduled frame finish time will be
-// closer to the vsync time. Other factors can also effect the scheduled frame
-// finish time, e.g. whether or not the System UI is being displayed.
-typedef struct DvrFrameSchedule {
-  // vsync_count is used as a frame identifier.
-  uint32_t vsync_count;
-
-  // The time when the app should finish rendering its frame, including all GPU
-  // work.
-  int64_t scheduled_frame_finish_ns;
-} DvrFrameSchedule;
-
-// Sleep until it's time to render the next frame. This should be the first
-// function called as part of an app's render loop, which normally looks like
-// this:
-//
-// while (1) {
-//   DvrFrameSchedule schedule;
-//   dvrGraphicsWaitNextFrame(..., &schedule); // Sleep until it's time to
-//                                             // render the next frame
-//   pose = dvrPoseGet(schedule.vsync_count);
-//   dvrBeginRenderFrame(...);
-//   <render a frame using the pose>
-//   dvrPresent(...); // Post the buffer
-// }
-//
-// |start_delay_ns| adjusts how long this function blocks the app from starting
-// its next frame. If |start_delay_ns| is 0, the function waits until the
-// scheduled frame finish time for the current frame, which gives the app one
-// full vsync period to render the next frame. If the app needs less than a full
-// vysnc period to render the frame, pass in a non-zero |start_delay_ns| to
-// delay the start of frame rendering further. For example, if the vsync period
-// is 11.1ms and the app takes 6ms to render a frame, consider setting this to
-// 5ms (note that the value is in nanoseconds, so 5,000,000ns) so that the app
-// finishes the frame closer to the scheduled frame finish time. Delaying the
-// start of rendering allows the app to use a more up-to-date pose for
-// rendering.
-// |start_delay_ns| must be a positive value or 0. If you're unsure what to set
-// for |start_delay_ns|, use 0.
-//
-// |out_next_frame_schedule| is an output parameter that will contain the
-// schedule for the next frame. It can be null. This function returns a negative
-// error code on failure.
-int dvrGraphicsWaitNextFrame(DvrGraphicsContext* graphics_context,
-                             int64_t start_delay_ns,
-                             DvrFrameSchedule* out_next_frame_schedule);
-
-// Prepares the graphics context's texture for rendering.  This function should
-// be called once for each frame, ideally immediately before the first GL call
-// on the framebuffer which wraps the surface texture.
-//
-// For GL contexts, GL states are modified as follows by this function:
-// glBindTexture(GL_TEXTURE_2D, 0);
-//
-// @param[in] graphics_context The DvrGraphicsContext.
-// @param[in] render_pose_orientation Head pose orientation that rendering for
-//            this frame will be based off of. This must be an unmodified value
-//            from DvrPoseAsync, returned by dvrPoseGet.
-// @param[in] render_pose_translation Head pose translation that rendering for
-//            this frame will be based off of. This must be an unmodified value
-//            from DvrPoseAsync, returned by dvrPoseGet.
-// @return 0 on success or a negative error code on failure.
-// Check GL errors with glGetError for other error conditions.
-int dvrBeginRenderFrameEds(DvrGraphicsContext* graphics_context,
-                           float32x4_t render_pose_orientation,
-                           float32x4_t render_pose_translation);
-int dvrBeginRenderFrameEdsVk(DvrGraphicsContext* graphics_context,
-                             float32x4_t render_pose_orientation,
-                             float32x4_t render_pose_translation,
-                             VkSemaphore acquire_semaphore,
-                             VkFence acquire_fence,
-                             uint32_t* swapchain_image_index,
-                             VkImageView* swapchain_image_view);
-// Same as dvrBeginRenderFrameEds, but with no EDS (asynchronous reprojection).
-//
-// For GL contexts, GL states are modified as follows by this function:
-// glBindTexture(GL_TEXTURE_2D, 0);
-//
-// @param[in] graphics_context The DvrGraphicsContext.
-// @return 0 on success or a negative error code on failure.
-// Check GL errors with glGetError for other error conditions.
-int dvrBeginRenderFrame(DvrGraphicsContext* graphics_context);
-int dvrBeginRenderFrameVk(DvrGraphicsContext* graphics_context,
-                          VkSemaphore acquire_semaphore, VkFence acquire_fence,
-                          uint32_t* swapchain_image_index,
-                          VkImageView* swapchain_image_view);
-
-// Maximum number of views per surface buffer (for multiview, multi-eye, etc).
-#define DVR_GRAPHICS_SURFACE_MAX_VIEWS 4
-
-// Output data format of late latch shader. The application can bind all or part
-// of this data with the buffer ID returned by dvrBeginRenderFrameLateLatch.
-// This struct is compatible with std140 layout for use from shaders.
-struct __attribute__((__packed__)) DvrGraphicsLateLatchData {
-  // Column-major order.
-  float view_proj_matrix[DVR_GRAPHICS_SURFACE_MAX_VIEWS][16];
-  // Column-major order.
-  float view_matrix[DVR_GRAPHICS_SURFACE_MAX_VIEWS][16];
-  // Quaternion for pose orientation from start space.
-  float pose_orientation[4];
-  // Pose translation from start space.
-  float pose_translation[4];
-};
-
-// Begin render frame with late latching of pose data. This kicks off a compute
-// shader that will read the latest head pose and then compute and output
-// matrices that can be used by application shaders.
-//
-// Matrices are computed with the following pseudo code.
-//   Pose pose = getLateLatchPose();
-//   out.pose_orientation = pose.orientation;
-//   out.pose_translation = pose.translation;
-//   mat4 head_from_center = ComputeInverseMatrix(pose);
-//   for each view:
-//     out.viewMatrix[view] =
-//         eye_from_head_matrices[view] * head_from_center *
-//         pose_offset_matrices[view];
-//     out.viewProjMatrix[view] =
-//         projection_matrices[view] * out.viewMatrix[view];
-//
-// For GL contexts, GL states are modified as follows by this function:
-// glBindTexture(GL_TEXTURE_2D, 0);
-// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, 0);
-// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, 0);
-// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, 0);
-// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, 0);
-// glUseProgram(0);
-//
-// @param[in] graphics_context The DvrGraphicsContext.
-// @param[in] flags Specify 0.
-// @param[in] target_vsync_count The target vsync count that this frame will
-//            display at. This is used for pose prediction.
-// @param[in] num_views Number of matrices in each of the following matrix array
-//            parameters. Typically 2 for left and right eye views. Maximum is
-//            DVR_GRAPHICS_SURFACE_MAX_VIEWS.
-// @param[in] projection_matrices Array of pointers to |num_views| matrices with
-//            column-major layout. These are the application projection
-//            matrices.
-// @param[in] eye_from_head_matrices Array of pointers to |num_views| matrices
-//            with column-major layout. See pseudo code for how these are used.
-// @param[in] pose_offset_matrices Array of pointers to |num_views| matrices
-//            with column-major layout. See pseudo code for how these are used.
-// @param[out] out_late_latch_buffer_id The GL buffer ID of the output buffer of
-//             of type DvrGraphicsLateLatchData.
-// @return 0 on success or a negative error code on failure.
-// Check GL errors with glGetError for other error conditions.
-int dvrBeginRenderFrameLateLatch(DvrGraphicsContext* graphics_context,
-                                 uint32_t flags, uint32_t target_vsync_count,
-                                 int num_views,
-                                 const float** projection_matrices,
-                                 const float** eye_from_head_matrices,
-                                 const float** pose_offset_matrices,
-                                 uint32_t* out_late_latch_buffer_id);
-
-// Present a frame for display.
-// This call is normally non-blocking, unless the internal buffer queue is full.
-// @return 0 on success or a negative error code on failure.
-int dvrPresent(DvrGraphicsContext* graphics_context);
-int dvrPresentVk(DvrGraphicsContext* graphics_context,
-                 VkSemaphore submit_semaphore, uint32_t swapchain_image_index);
-
-// Post the next buffer early. This allows the application to race with either
-// the async EDS process or the scanline for applications that are not using
-// system distortion. When this is called, the next buffer in the queue is
-// posted for display. It is up to the application to kick its GPU rendering
-// work in time. If the rendering is incomplete there will be significant,
-// undesirable tearing artifacts.
-// It is not recommended to use this feature with system distortion.
-void dvrGraphicsPostEarly(DvrGraphicsContext* graphics_context);
-
-// Used to retrieve frame measurement timings from dvrGetFrameScheduleResults().
-typedef struct DvrFrameScheduleResult {
-  // vsync_count is used as a frame identifier.
-  uint32_t vsync_count;
-
-  // The app's scheduled frame finish time.
-  int64_t scheduled_frame_finish_ns;
-
-  // The difference (in nanoseconds) between the scheduled finish time and the
-  // actual finish time.
-  //
-  // A value of +2ms for frame_finish_offset_ns indicates the app's frame was
-  // late and may have been skipped by the compositor for that vsync. A value of
-  // -1ms indicates the app's frame finished just ahead of schedule, as
-  // desired. A value of -6ms indicates the app's frame finished well ahead of
-  // schedule for that vsync. In that case the app may have unnecessary visual
-  // latency. Consider using the start_delay_ns parameter in
-  // dvrGraphicsWaitNextFrame() to align the app's frame finish time closer to
-  // the scheduled finish time.
-  int64_t frame_finish_offset_ns;
-} DvrFrameScheduleResult;
-
-// Retrieve the latest frame schedule results for the app. To collect all the
-// results this should be called each frame. The results for each frame are
-// returned only once.
-// The number of results written to |results| is returned on success, or a
-// negative error code on failure.
-// |graphics_context| is the context to retrieve frame schedule results for.
-// |results| is an array that will contain the frame schedule results.
-// |result_count| is the size of the |results| array. It's recommended to pass
-// in an array with 2 elements to ensure results for all frames are collected.
-int dvrGetFrameScheduleResults(DvrGraphicsContext* graphics_context,
-                               DvrFrameScheduleResult* results,
-                               int result_count);
-
-// Make the surface visible or hidden based on |visible|.
-// 0: hidden, Non-zero: visible.
-void dvrGraphicsSurfaceSetVisible(DvrGraphicsContext* graphics_context,
-                                  int visible);
-
-// Returns surface visilibity last requested by the client.
-int dvrGraphicsSurfaceGetVisible(DvrGraphicsContext* graphics_context);
-
-// Returns surface z order last requested by the client.
-int dvrGraphicsSurfaceGetZOrder(DvrGraphicsContext* graphics_context);
-
-// Sets the compositor z-order of the surface. Higher values display on
-// top of lower values.
-void dvrGraphicsSurfaceSetZOrder(DvrGraphicsContext* graphics_context,
-                                 int z_order);
-
-typedef struct DvrVideoMeshSurface DvrVideoMeshSurface;
-
-DvrVideoMeshSurface* dvrGraphicsVideoMeshSurfaceCreate(
-    DvrGraphicsContext* graphics_context);
-void dvrGraphicsVideoMeshSurfaceDestroy(DvrVideoMeshSurface* surface);
-
-// Present a VideoMeshSurface with the current video mesh transfromation matrix.
-void dvrGraphicsVideoMeshSurfacePresent(DvrGraphicsContext* graphics_context,
-                                        DvrVideoMeshSurface* surface,
-                                        const int eye, const float* transform);
-
-__END_DECLS
-
-#endif  // DVR_GRAPHICS_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/dummy_native_window.h b/libs/vr/libdisplay/include/private/dvr/dummy_native_window.h
deleted file mode 100644
index b9c1067..0000000
--- a/libs/vr/libdisplay/include/private/dvr/dummy_native_window.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef ANDROID_DVR_DUMMY_NATIVE_WINDOW_H_
-#define ANDROID_DVR_DUMMY_NATIVE_WINDOW_H_
-
-#include <android/native_window.h>
-#include <ui/ANativeObjectBase.h>
-#include <system/window.h>
-
-namespace android {
-namespace dvr {
-
-// DummyNativeWindow is an implementation of ANativeWindow that is
-// essentially empty and is used as a surface placeholder during context
-// creation for contexts that we don't intend to call eglSwapBuffers on.
-class DummyNativeWindow
-    : public ANativeObjectBase<ANativeWindow, DummyNativeWindow,
-                               LightRefBase<DummyNativeWindow> > {
- public:
-  DummyNativeWindow();
-
- private:
-  static int Query(const ANativeWindow* window, int what, int* value);
-  static int Perform(ANativeWindow* window, int operation, ...);
-
-  DummyNativeWindow(const DummyNativeWindow&) = delete;
-  void operator=(DummyNativeWindow&) = delete;
-};
-
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_DUMMY_NATIVE_WINDOW_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/frame_history.h b/libs/vr/libdisplay/include/private/dvr/frame_history.h
deleted file mode 100644
index 53e0717..0000000
--- a/libs/vr/libdisplay/include/private/dvr/frame_history.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef ANDROID_DVR_FRAME_HISTORY_H_
-#define ANDROID_DVR_FRAME_HISTORY_H_
-
-#include <dvr/graphics.h>
-#include <pdx/file_handle.h>
-#include <private/dvr/ring_buffer.h>
-
-namespace android {
-namespace dvr {
-
-// FrameHistory tracks frame times from the start of rendering commands to when
-// the buffer is ready.
-class FrameHistory {
- public:
-  FrameHistory();
-  explicit FrameHistory(int pending_frame_buffer_size);
-
-  void Reset(int pending_frame_buffer_size);
-
-  // Call when starting rendering commands (i.e. dvrBeginRenderFrame).
-  void OnFrameStart(uint32_t scheduled_vsync, int64_t scheduled_finish_ns);
-
-  // Call when rendering commands are finished (i.e. dvrPresent).
-  void OnFrameSubmit(android::pdx::LocalHandle&& fence);
-
-  // Call once per frame to see if any pending frames have finished.
-  void CheckForFinishedFrames();
-
-  // Uses the recently completed frame render times to predict how long the next
-  // frame will take, in vsync intervals. For example if the predicted frame
-  // time is 10ms and the vsync interval is 11ms, this will return 1. If the
-  // predicted frame time is 12ms and the vsync interval is 11ms, this will
-  // return 2.
-  int PredictNextFrameVsyncInterval(int64_t vsync_period_ns) const;
-
-  // Returns results for recently completed frames. Each frame's result is
-  // returned only once.
-  int GetPreviousFrameResults(DvrFrameScheduleResult* results,
-                              int result_count);
-
-  // Gets the vsync count for the most recently started frame. If there are no
-  // started frames this will return UINT32_MAX.
-  uint32_t GetCurrentFrameVsync() const;
-
- private:
-  struct PendingFrame {
-    int64_t start_ns;
-    uint32_t scheduled_vsync;
-    int64_t scheduled_finish_ns;
-    android::pdx::LocalHandle fence;
-
-    PendingFrame();
-    PendingFrame(int64_t start_ns, uint32_t scheduled_vsync,
-                 int64_t scheduled_finish_ns,
-                 android::pdx::LocalHandle&& fence);
-
-    PendingFrame(PendingFrame&&) = default;
-    PendingFrame& operator=(PendingFrame&&) = default;
-    PendingFrame(const PendingFrame&) = delete;
-    PendingFrame& operator=(const PendingFrame&) = delete;
-  };
-
-  RingBuffer<PendingFrame> pending_frames_;
-  RingBuffer<DvrFrameScheduleResult> finished_frames_;
-  RingBuffer<int64_t> frame_duration_history_;
-};
-
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_FRAME_HISTORY_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/gl_fenced_flush.h b/libs/vr/libdisplay/include/private/dvr/gl_fenced_flush.h
deleted file mode 100644
index 1d75335..0000000
--- a/libs/vr/libdisplay/include/private/dvr/gl_fenced_flush.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef ANDROID_DVR_GL_FENCED_FLUSH_H_
-#define ANDROID_DVR_GL_FENCED_FLUSH_H_
-
-#include <EGL/egl.h>
-#include <pdx/file_handle.h>
-
-namespace android {
-namespace dvr {
-
-// Creates a EGL_SYNC_NATIVE_FENCE_ANDROID and flushes. Returns the fence as a
-// file descriptor.
-pdx::LocalHandle CreateGLSyncAndFlush(EGLDisplay display);
-
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_GL_FENCED_FLUSH_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/graphics_private.h b/libs/vr/libdisplay/include/private/dvr/graphics_private.h
deleted file mode 100644
index a08b0df..0000000
--- a/libs/vr/libdisplay/include/private/dvr/graphics_private.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef ANDROID_DVR_GRAPHICS_PRIVATE_H_
-#define ANDROID_DVR_GRAPHICS_PRIVATE_H_
-
-#ifdef __ARM_NEON
-#include <arm_neon.h>
-#else
-#ifndef __FLOAT32X4T_86
-#define __FLOAT32X4T_86
-typedef float float32x4_t __attribute__ ((__vector_size__ (16)));
-typedef struct float32x4x4_t { float32x4_t val[4]; };
-#endif
-#endif
-
-#include <sys/cdefs.h>
-
-#include <dvr/graphics.h>
-
-__BEGIN_DECLS
-
-#define kSurfaceBufferMaxCount 4
-#define kSurfaceViewMaxCount 4
-
-struct __attribute__((packed, aligned(16))) DisplaySurfaceMetadata {
-  // Array of orientations and translations corresponding with surface buffers.
-  // The index is associated with each allocated buffer by DisplaySurface and
-  // communicated to clients.
-  // The maximum number of buffers is hard coded here as 4 so that we can bind
-  // this data structure in GPU shaders.
-  float32x4_t orientation[kSurfaceBufferMaxCount];
-  float32x4_t translation[kSurfaceBufferMaxCount];
-};
-
-// Sets the pose used by the system for EDS. If dvrBeginRenderFrameEds() or
-// dvrBeginRenderFrameLateLatch() are called instead of dvrBeginRenderFrame()
-// it's not necessary to call this function. If this function is used, the call
-// must be made after dvrBeginRenderFrame() and before dvrPresent().
-//
-// @param[in] graphics_context The DvrGraphicsContext.
-// @param[in] render_pose_orientation Head pose orientation that rendering for
-//            this frame will be based off of. This must be an unmodified value
-//            from DvrPoseAsync, returned by dvrPoseGet.
-// @param[in] render_pose_translation Head pose translation that rendering for
-//            this frame will be based off of. This must be an unmodified value
-//            from DvrPoseAsync, returned by dvrPoseGet.
-// @return 0 on success or a negative error code on failure.
-int dvrSetEdsPose(DvrGraphicsContext* graphics_context,
-                  float32x4_t render_pose_orientation,
-                  float32x4_t render_pose_translation);
-
-__END_DECLS
-
-#endif  // ANDROID_DVR_GRAPHICS_PRIVATE_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/late_latch.h b/libs/vr/libdisplay/include/private/dvr/late_latch.h
deleted file mode 100644
index c91d15c..0000000
--- a/libs/vr/libdisplay/include/private/dvr/late_latch.h
+++ /dev/null
@@ -1,190 +0,0 @@
-#ifndef ANDROID_DVR_LATE_LATCH_H_
-#define ANDROID_DVR_LATE_LATCH_H_
-
-#include <atomic>
-#include <thread>
-#include <vector>
-
-#include <dvr/dvr_display_types.h>
-#include <dvr/pose_client.h>
-#include <pdx/file_handle.h>
-#include <private/dvr/graphics/shader_program.h>
-#include <private/dvr/graphics/vr_gl_extensions.h>
-#include <private/dvr/graphics_private.h>
-#include <private/dvr/types.h>
-
-struct DvrPose;
-
-namespace android {
-namespace dvr {
-
-// Input data for late latch compute shader.
-struct LateLatchInput {
-  // For app late latch:
-  mat4 eye_from_head_mat[kSurfaceViewMaxCount];
-  mat4 proj_mat[kSurfaceViewMaxCount];
-  mat4 pose_offset[kSurfaceViewMaxCount];
-  // For EDS late latch only:
-  mat4 eds_mat1[kSurfaceViewMaxCount];
-  mat4 eds_mat2[kSurfaceViewMaxCount];
-  // For both app and EDS late latch:
-  uint32_t pose_index;
-  uint32_t render_pose_index;
-};
-
-// Output data for late latch shader. The application can use all or part of
-// this data by calling LateLatch::BindUniformBuffer.
-// This struct matches the layout of DvrGraphicsLateLatchData.
-struct LateLatchOutput {
-  mat4 view_proj_matrix[kSurfaceViewMaxCount];
-  mat4 view_matrix[kSurfaceViewMaxCount];
-  vec4 pose_quaternion;
-  vec4 pose_translation;
-};
-
-// LateLatch provides a facility for GL workloads to acquire a late-adjusted
-// model-view projection matrix, adjusted based on the position/quaternion pose
-// read from a buffer that is being written to asynchronously. The adjusted
-// MVP matrix is written to a GL buffer object via GL transform feedback.
-class LateLatch {
- public:
-  enum BufferType {
-    kViewProjMatrix,
-    kViewMatrix,
-    kPoseQuaternion,
-    kPoseTranslation,
-    // Max transform feedback count is 4, so no more buffers can go here.
-    kNumBuffers,
-  };
-
-  static size_t GetBufferSize(BufferType type) {
-    switch (type) {
-      default:
-      case kViewProjMatrix:
-      case kViewMatrix:
-        return 4 * 4 * sizeof(float);
-      case kPoseQuaternion:
-      case kPoseTranslation:
-        return 4 * sizeof(float);
-    }
-  }
-
-  static size_t GetBufferOffset(BufferType type, int view) {
-    switch (type) {
-      default:
-      case kViewProjMatrix:
-        return offsetof(LateLatchOutput, view_proj_matrix) +
-               GetBufferSize(type) * view;
-      case kViewMatrix:
-        return offsetof(LateLatchOutput, view_matrix) +
-               GetBufferSize(type) * view;
-      case kPoseQuaternion:
-        return offsetof(LateLatchOutput, pose_quaternion);
-      case kPoseTranslation:
-        return offsetof(LateLatchOutput, pose_translation);
-    }
-  }
-
-  explicit LateLatch(bool is_app_late_latch);
-  LateLatch(bool is_app_late_latch, pdx::LocalHandle&& surface_metadata_fd);
-  ~LateLatch();
-
-  // Bind the late-latch output data as a GL_UNIFORM_BUFFER. For example,
-  // to bind just the view_matrix from the output:
-  // BindUniformBuffer(BINDING, offsetof(LateLatchOutput, view_matrix),
-  //                   sizeof(mat4));
-  // buffer_index is the index of one of the output buffers if more than 1 were
-  // requested in the constructor.
-  void BindUniformBuffer(GLuint ubo_binding, size_t offset, size_t size) const {
-    glBindBufferRange(GL_UNIFORM_BUFFER, ubo_binding, output_buffer_id_, offset,
-                      size);
-  }
-
-  void BindUniformBuffer(GLuint ubo_binding, BufferType type, int view) const {
-    glBindBufferRange(GL_UNIFORM_BUFFER, ubo_binding, output_buffer_id_,
-                      GetBufferOffset(type, view), GetBufferSize(type));
-  }
-
-  GLuint output_buffer_id() const { return output_buffer_id_; }
-
-  void UnbindUniformBuffer(GLuint ubo_binding) const {
-    glBindBufferBase(GL_UNIFORM_BUFFER, ubo_binding, 0);
-  }
-
-  void CaptureOutputData(LateLatchOutput* data) const;
-
-  // Add the late latch GL commands for this frame. This should be done just
-  // before the first application draw calls that are dependent on the head
-  // latest head pose.
-  //
-  // For efficiency, the application projection and eye_from_head matrices are
-  // passed through the late latch shader and output in various combinations to
-  // allow for both simple application vertex shaders that can take the view-
-  // projection matrix as-is and shaders that need to access the view matrix
-  // separately.
-  //
-  // GL state must be reset to default for this call.
-  void AddLateLatch(const LateLatchInput& data) const;
-
-  // After calling AddEdsLateLatch one or more times, this method must be called
-  // to add the necessary GL memory barrier to ensure late latch outputs are
-  // written before the EDS and warp shaders read them.
-  void PostEdsLateLatchBarrier() const {
-    // The transform feedback buffer is going to be read as a uniform by EDS,
-    // so we need a uniform memory barrier.
-    glMemoryBarrier(GL_UNIFORM_BARRIER_BIT);
-  }
-
-  // Typically not for use by application code. This method adds the EDS late
-  // latch that will adjust the application framebuffer with the latest head
-  // pose.
-  // buffer_index is the index of one of the output buffers if more than 1 were
-  // requested in the constructor.
-  void AddEdsLateLatch(const LateLatchInput& data,
-                       GLuint render_pose_buffer_object) const;
-
-  // For debugging purposes, capture the output during the next call to
-  // AddLateLatch. Set to NULL to reset.
-  void SetLateLatchDataCapture(LateLatchOutput* app_late_latch) {
-    app_late_latch_output_ = app_late_latch;
-  }
-
-  // For debugging purposes, capture the output during the next call to
-  // AddEdsLateLatch. Set to NULL to reset.
-  void SetEdsLateLatchDataCapture(LateLatchOutput* eds_late_latch) {
-    eds_late_latch_output_ = eds_late_latch;
-  }
-
- private:
-  LateLatch(const LateLatch&) = delete;
-  LateLatch& operator=(const LateLatch&) = delete;
-
-  void LoadLateLatchShader();
-
-  // Late latch shader.
-  ShaderProgram late_latch_program_;
-
-  // Async pose ring buffer object.
-  GLuint pose_buffer_object_;
-
-  GLuint metadata_buffer_id_;
-
-  // Pose matrix buffers
-  GLuint input_buffer_id_;
-  GLuint output_buffer_id_;
-
-  bool is_app_late_latch_;
-  // During development, these can be used to capture the pose output data.
-  LateLatchOutput* app_late_latch_output_;
-  LateLatchOutput* eds_late_latch_output_;
-
-  DvrPose* pose_client_;
-
-  pdx::LocalHandle surface_metadata_fd_;
-  pdx::LocalHandle pose_buffer_fd_;
-};
-
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_LATE_LATCH_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/native_buffer_queue.h b/libs/vr/libdisplay/include/private/dvr/native_buffer_queue.h
deleted file mode 100644
index a260f17..0000000
--- a/libs/vr/libdisplay/include/private/dvr/native_buffer_queue.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef ANDROID_DVR_NATIVE_BUFFER_QUEUE_H_
-#define ANDROID_DVR_NATIVE_BUFFER_QUEUE_H_
-
-#include <semaphore.h>
-
-#include <mutex>
-#include <vector>
-
-#include <private/dvr/native_buffer.h>
-#include <private/dvr/ring_buffer.h>
-
-#include "display_client.h"
-
-namespace android {
-namespace dvr {
-namespace display {
-
-// A wrapper over dvr::ProducerQueue that caches EGLImage.
-class NativeBufferQueue {
- public:
-  NativeBufferQueue(EGLDisplay display,
-                    const std::shared_ptr<ProducerQueue>& producer_queue,
-                    uint32_t width, uint32_t height, uint32_t format,
-                    uint64_t usage, size_t capacity);
-
-  uint32_t width() const { return width_; }
-  uint32_t height() const { return height_; }
-  uint32_t format() const { return format_; }
-  uint64_t usage() const { return usage_; }
-  size_t capacity() const { return producer_queue_->capacity(); }
-
-  // Dequeue a buffer from the free queue, blocking until one is available.
-  NativeBufferProducer* Dequeue();
-
-  // An noop here to keep Vulkan path in GraphicsContext happy.
-  // TODO(jwcai, cort) Move Vulkan path into GVR/Google3.
-  void Enqueue(NativeBufferProducer* /*buffer*/) {}
-
- private:
-  EGLDisplay display_;
-  uint32_t width_;
-  uint32_t height_;
-  uint32_t format_;
-  uint64_t usage_;
-  std::shared_ptr<ProducerQueue> producer_queue_;
-  std::vector<sp<NativeBufferProducer>> buffers_;
-
-  NativeBufferQueue(const NativeBufferQueue&) = delete;
-  void operator=(const NativeBufferQueue&) = delete;
-};
-
-}  // namespace display
-}  // namespace dvr
-}  // namespace android
-
-#endif  // ANDROID_DVR_NATIVE_BUFFER_QUEUE_H_
diff --git a/libs/vr/libdisplay/late_latch.cpp b/libs/vr/libdisplay/late_latch.cpp
deleted file mode 100644
index e67f009..0000000
--- a/libs/vr/libdisplay/late_latch.cpp
+++ /dev/null
@@ -1,460 +0,0 @@
-#include "include/private/dvr/late_latch.h"
-
-#include <unistd.h>
-
-#include <fstream>
-#include <iostream>
-#include <string>
-
-#include <log/log.h>
-#include <private/dvr/clock_ns.h>
-#include <private/dvr/debug.h>
-#include <private/dvr/graphics/gpu_profiler.h>
-#include <private/dvr/pose_client_internal.h>
-#include <private/dvr/sensor_constants.h>
-#include <private/dvr/types.h>
-
-#define PRINT_MATRIX 0
-
-#if PRINT_MATRIX
-#ifndef LOG_TAG
-#define LOG_TAG "latelatch"
-#endif
-
-#define PE(str, ...)                                                  \
-  fprintf(stderr, "[%s:%d] " str, __FILE__, __LINE__, ##__VA_ARGS__); \
-  ALOGI("[%s:%d] " str, __FILE__, __LINE__, ##__VA_ARGS__)
-
-#define PV4(v) PE(#v "=%f,%f,%f,%f\n", v[0], v[1], v[2], v[3]);
-#define PM4(m)                                                               \
-  PE(#m ":\n %f,%f,%f,%f\n %f,%f,%f,%f\n %f,%f,%f,%f\n %f,%f,%f,%f\n",       \
-     m(0, 0), m(0, 1), m(0, 2), m(0, 3), m(1, 0), m(1, 1), m(1, 2), m(1, 3), \
-     m(2, 0), m(2, 1), m(2, 2), m(2, 3), m(3, 0), m(3, 1), m(3, 2), m(3, 3))
-#endif  // PRINT_MATRIX
-
-#define STRINGIFY2(s) #s
-#define STRINGIFY(s) STRINGIFY2(s)
-
-// Compute shader bindings.
-// GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS must be at least 8 for GLES 3.1.
-#define POSE_BINDING 0
-#define RENDER_POSE_BINDING 1
-#define INPUT_BINDING 2
-#define OUTPUT_BINDING 3
-
-using android::pdx::LocalHandle;
-
-namespace {
-
-static const std::string kShaderLateLatch = R"(  // NOLINT
-  struct Pose {
-    vec4 quat;
-    vec3 pos;
-  };
-
-  // Must match DvrPoseAsync C struct.
-  struct DvrPoseAsync {
-    vec4 orientation;
-    vec4 translation;
-    vec4 right_orientation;
-    vec4 right_translation;
-    vec4 angular_velocity;
-    vec4 velocity;
-    vec4 reserved[2];
-  };
-
-  // Must match LateLatchInputData C struct.
-  layout(binding = INPUT_BINDING, std140)
-  buffer InputData {
-    mat4 uEyeFromHeadMat[kSurfaceViewMaxCount];
-    mat4 uProjMat[kSurfaceViewMaxCount];
-    mat4 uPoseOffset[kSurfaceViewMaxCount];
-    mat4 uEdsMat1[kSurfaceViewMaxCount];
-    mat4 uEdsMat2[kSurfaceViewMaxCount];
-    uint uPoseIndex;
-    uint uRenderPoseIndex;
-  } bIn;
-
-  // std140 is to layout the structure in a consistent, standard way so we
-  // can access it from C++.
-  // This structure exactly matches the pose ring buffer in pose_client.h.
-  layout(binding = POSE_BINDING, std140)
-  buffer PoseBuffer {
-    DvrPoseAsync data[kPoseAsyncBufferTotalCount];
-  } bPose;
-
-  // Must stay in sync with DisplaySurfaceMetadata C struct.
-  // GPU thread 0 will exclusively read in a pose and capture it
-  // into this array.
-  layout(binding = RENDER_POSE_BINDING, std140)
-  buffer DisplaySurfaceMetadata {
-    vec4 orientation[kSurfaceBufferMaxCount];
-    vec4 translation[kSurfaceBufferMaxCount];
-  } bSurfaceData;
-
-  // Must stay in sync with DisplaySurfaceMetadata C struct.
-  // Each thread writes to a vertic
-  layout(binding = OUTPUT_BINDING, std140)
-  buffer Output {
-    mat4 viewProjMatrix[kSurfaceViewMaxCount];
-    mat4 viewMatrix[kSurfaceViewMaxCount];
-    vec4 quaternion;
-    vec4 translation;
-  } bOut;
-
-  // Thread 0 will also store the single quat/pos pair in shared variables
-  // for the other threads to use (left and right eye in this array).
-  shared Pose sharedPose[2];
-
-  // Rotate v1 by the given quaternion. This is based on mathfu's
-  // Quaternion::Rotate function. It is the typical implementation of this
-  // operation. Eigen has a similar method (Quaternion::_transformVector) that
-  // supposedly requires fewer operations, but I am skeptical of optimizing
-  // shader code without proper profiling first.
-  vec3 rotate(vec4 quat, vec3 v1) {
-    float ss = 2.0 * quat.w;
-    vec3 v = quat.xyz;
-    return ss * cross(v, v1) + (ss * quat.w - 1.0) * v1 +
-           2.0 * dot(v, v1) * v;
-  }
-
-  // See Eigen Quaternion::conjugate;
-  // Note that this isn't a true multiplicative inverse unless you can guarantee
-  // quat is also normalized, but that typically isn't an issue for our
-  // purposes.
-  vec4 quatInvert(vec4 quat) {
-    return vec4(-quat.xyz, quat.w);
-  }
-
-  // This is based on mathfu's Quaternion::operator*(Quaternion)
-  // Eigen's version is mathematically equivalent, just notationally different.
-  vec4 quatMul(vec4 q1, vec4 q2) {
-    return vec4(q1.w * q2.xyz + q2.w * q1.xyz + cross(q1.xyz, q2.xyz),
-                q1.w * q2.w - dot(q1.xyz, q2.xyz));
-  }
-
-  // Equivalent to pose.h GetObjectFromReferenceMatrix.
-  mat4 getInverseMatrix(Pose pose) {
-    // Invert quaternion and store fields the way Eigen does so we can
-    // keep in sync with Eigen methods easier.
-    vec4 quatInv = quatInvert(pose.quat);
-    vec3 v = quatInv.xyz;
-    float s = quatInv.w;
-    // Convert quaternion to matrix. See Eigen Quaternion::toRotationMatrix()
-    float x2 = v.x * v.x, y2 = v.y * v.y, z2 = v.z * v.z;
-    float sx = s * v.x, sy = s * v.y, sz = s * v.z;
-    float xz = v.x * v.z, yz = v.y * v.z, xy = v.x * v.y;
-    // Inverse translation.
-    vec3 point = -pose.pos;
-
-    return
-      mat4(1.0 - 2.0 * (y2 + z2), 2.0 * (xy + sz), 2.0 * (xz - sy), 0.0,
-           2.0 * (xy - sz), 1.0 - 2.0 * (x2 + z2), 2.0 * (sx + yz), 0.0,
-           2.0 * (sy + xz), 2.0 * (yz - sx), 1.0 - 2.0 * (x2 + y2), 0.0,
-           0.0, 0.0, 0.0, 1.0)*
-      mat4(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
-           point.x, point.y, point.z, 1.0);
-  }
-
-  void appLateLatch() {
-    uint poseIndex = (gl_LocalInvocationIndex & uint(1));
-    mat4 head_from_center = getInverseMatrix(sharedPose[poseIndex]);
-    bOut.viewMatrix[gl_LocalInvocationIndex] =
-        bIn.uEyeFromHeadMat[gl_LocalInvocationIndex] *
-        head_from_center * bIn.uPoseOffset[gl_LocalInvocationIndex];
-    bOut.viewProjMatrix[gl_LocalInvocationIndex] =
-        bIn.uProjMat[gl_LocalInvocationIndex] *
-        bOut.viewMatrix[gl_LocalInvocationIndex];
-  }
-
-  // Extract the app frame's pose.
-  Pose getPoseFromApp() {
-    Pose p;
-    p.quat = bSurfaceData.orientation[bIn.uRenderPoseIndex];
-    p.pos =  bSurfaceData.translation[bIn.uRenderPoseIndex].xyz;
-    return p;
-  }
-
-  // See Posef::GetPoseOffset.
-  Pose getPoseOffset(Pose p1, Pose p2) {
-    Pose p;
-    p.quat = quatMul(quatInvert(p2.quat), p1.quat);
-    // TODO(jbates) Consider enabling positional EDS when it is better
-    //              tested.
-    // p.pos = p2.pos - p1.pos;
-    p.pos = vec3(0.0);
-    return p;
-  }
-
-  void edsLateLatch() {
-    Pose pose1 = getPoseFromApp();
-    Pose correction;
-    // Ignore the texture pose if the quat is not unit-length.
-    float tex_quat_length = length(pose1.quat);
-    uint poseIndex = (gl_LocalInvocationIndex & uint(1));
-    if (abs(tex_quat_length - 1.0) < 0.001)
-      correction = getPoseOffset(pose1, sharedPose[poseIndex]);
-    else
-      correction = Pose(vec4(0, 0, 0, 1), vec3(0, 0, 0));
-    mat4 eye_old_from_eye_new_matrix = getInverseMatrix(correction);
-    bOut.viewProjMatrix[gl_LocalInvocationIndex] =
-        bIn.uEdsMat1[gl_LocalInvocationIndex] *
-        eye_old_from_eye_new_matrix * bIn.uEdsMat2[gl_LocalInvocationIndex];
-    // Currently unused, except for debugging:
-    bOut.viewMatrix[gl_LocalInvocationIndex] = eye_old_from_eye_new_matrix;
-  }
-
-  // One thread per surface view.
-  layout (local_size_x = kSurfaceViewMaxCount, local_size_y = 1,
-          local_size_z = 1) in;
-
-  void main() {
-    // First, thread 0 late latches pose and stores it into various places.
-    if (gl_LocalInvocationIndex == uint(0)) {
-      sharedPose[0].quat = bPose.data[bIn.uPoseIndex].orientation;
-      sharedPose[0].pos =  bPose.data[bIn.uPoseIndex].translation.xyz;
-      sharedPose[1].quat = bPose.data[bIn.uPoseIndex].right_orientation;
-      sharedPose[1].pos =  bPose.data[bIn.uPoseIndex].right_translation.xyz;
-      if (IS_APP_LATE_LATCH) {
-        bSurfaceData.orientation[bIn.uRenderPoseIndex] = sharedPose[0].quat;
-        bSurfaceData.translation[bIn.uRenderPoseIndex] = vec4(sharedPose[0].pos, 0.0);
-        // TODO(jbates) implement app late-latch support for separate eye poses.
-        // App late latch currently uses the same pose for both eye views.
-        sharedPose[1] = sharedPose[0];
-      }
-      bOut.quaternion = sharedPose[0].quat;
-      bOut.translation = vec4(sharedPose[0].pos, 0.0);
-    }
-
-    // Memory barrier to make sure all threads can see prior writes.
-    memoryBarrierShared();
-
-    // Execution barrier to block all threads here until all threads have
-    // reached this point -- ensures the late latching is done.
-    barrier();
-
-    if (IS_APP_LATE_LATCH)
-      appLateLatch();
-    else
-      edsLateLatch();
-  }
-)";
-
-}  // anonymous namespace
-
-namespace android {
-namespace dvr {
-
-LateLatch::LateLatch(bool is_app_late_latch)
-    : LateLatch(is_app_late_latch, LocalHandle()) {}
-
-LateLatch::LateLatch(bool is_app_late_latch,
-                     LocalHandle&& surface_metadata_fd)
-    : is_app_late_latch_(is_app_late_latch),
-      app_late_latch_output_(NULL),
-      eds_late_latch_output_(NULL),
-      surface_metadata_fd_(std::move(surface_metadata_fd)) {
-  CHECK_GL();
-  glGenBuffers(1, &input_buffer_id_);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, input_buffer_id_);
-  glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(LateLatchInput), nullptr,
-               GL_DYNAMIC_DRAW);
-  glGenBuffers(1, &output_buffer_id_);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, output_buffer_id_);
-  glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(LateLatchOutput), nullptr,
-               GL_DYNAMIC_COPY);
-  CHECK_GL();
-
-  pose_client_ = dvrPoseCreate();
-  if (!pose_client_) {
-    ALOGE("LateLatch Error: failed to create pose client");
-  } else {
-    int ret = privateDvrPoseGetRingBufferFd(pose_client_, &pose_buffer_fd_);
-    if (ret < 0) {
-      ALOGE("LateLatch Error: failed to get pose ring buffer");
-    }
-  }
-
-  glGenBuffers(1, &pose_buffer_object_);
-  glGenBuffers(1, &metadata_buffer_id_);
-  if (!glBindSharedBufferQCOM) {
-    ALOGE("Error: Missing gralloc buffer extension, no pose data");
-  } else {
-    if (pose_buffer_fd_) {
-      glBindBuffer(GL_SHADER_STORAGE_BUFFER, pose_buffer_object_);
-      glBindSharedBufferQCOM(GL_SHADER_STORAGE_BUFFER,
-                             kPoseAsyncBufferTotalCount * sizeof(DvrPoseAsync),
-                             pose_buffer_fd_.Get());
-    }
-    CHECK_GL();
-  }
-
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, metadata_buffer_id_);
-  if (surface_metadata_fd_ && glBindSharedBufferQCOM) {
-    glBindSharedBufferQCOM(GL_SHADER_STORAGE_BUFFER,
-                           sizeof(DisplaySurfaceMetadata),
-                           surface_metadata_fd_.Get());
-  } else {
-    // Fall back on internal metadata buffer when none provided, for example
-    // when distortion is done in the application process.
-    glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(DisplaySurfaceMetadata),
-                 nullptr, GL_DYNAMIC_COPY);
-  }
-  CHECK_GL();
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
-
-  CHECK_GL();
-  LoadLateLatchShader();
-}
-
-LateLatch::~LateLatch() {
-  glDeleteBuffers(1, &metadata_buffer_id_);
-  glDeleteBuffers(1, &input_buffer_id_);
-  glDeleteBuffers(1, &output_buffer_id_);
-  glDeleteBuffers(1, &pose_buffer_object_);
-  dvrPoseDestroy(pose_client_);
-}
-
-void LateLatch::LoadLateLatchShader() {
-  std::string str;
-  str += "\n#define POSE_BINDING " STRINGIFY(POSE_BINDING);
-  str += "\n#define RENDER_POSE_BINDING " STRINGIFY(RENDER_POSE_BINDING);
-  str += "\n#define INPUT_BINDING " STRINGIFY(INPUT_BINDING);
-  str += "\n#define OUTPUT_BINDING " STRINGIFY(OUTPUT_BINDING);
-  str += "\n#define kPoseAsyncBufferTotalCount " STRINGIFY(
-      kPoseAsyncBufferTotalCount);
-  str += "\n#define kSurfaceBufferMaxCount " STRINGIFY(kSurfaceBufferMaxCount);
-  str += "\n#define kSurfaceBufferMaxCount " STRINGIFY(kSurfaceBufferMaxCount);
-  str += "\n#define kSurfaceViewMaxCount " STRINGIFY(kSurfaceViewMaxCount);
-  str += "\n#define IS_APP_LATE_LATCH ";
-  str += is_app_late_latch_ ? "true" : "false";
-  str += "\n";
-  str += kShaderLateLatch;
-  late_latch_program_.Link(str);
-  CHECK_GL();
-}
-
-void LateLatch::CaptureOutputData(LateLatchOutput* data) const {
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, output_buffer_id_);
-  LateLatchOutput* out_data = static_cast<LateLatchOutput*>(glMapBufferRange(
-      GL_SHADER_STORAGE_BUFFER, 0, sizeof(LateLatchOutput), GL_MAP_READ_BIT));
-  *data = *out_data;
-  glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
-  CHECK_GL();
-}
-
-void LateLatch::AddLateLatch(const LateLatchInput& data) const {
-  LOG_ALWAYS_FATAL_IF(!is_app_late_latch_);
-  CHECK_GL();
-  late_latch_program_.Use();
-
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING,
-                   metadata_buffer_id_);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, pose_buffer_object_);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, output_buffer_id_);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, input_buffer_id_);
-  LateLatchInput* adata = (LateLatchInput*)glMapBufferRange(
-      GL_SHADER_STORAGE_BUFFER, 0, sizeof(LateLatchInput),
-      GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
-  if (adata)
-    *adata = data;
-  else
-    ALOGE("Error: LateLatchInput gl mapping is null");
-  glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, input_buffer_id_);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
-  CHECK_GL();
-
-  // The output buffer is going to be written but it may be read by
-  // earlier shaders, so we need a shader storage memory barrier.
-  glMemoryBarrier(GL_SHADER_STORAGE_BUFFER);
-
-  glDispatchCompute(1, 1, 1);
-  CHECK_GL();
-
-  // The transform feedback buffer is going to be read as a uniform by the app,
-  // so we need a uniform memory barrier.
-  glMemoryBarrier(GL_UNIFORM_BARRIER_BIT);
-
-  if (app_late_latch_output_) {
-    // Capture the output data:
-    CaptureOutputData(app_late_latch_output_);
-  }
-#if PRINT_MATRIX
-  // Print the composed matrix to stderr:
-  LateLatchOutput out_data;
-  CaptureOutputData(&out_data);
-  CHECK_GL();
-  PE("LL APP slot:%d\n", data.render_pose_index);
-  PM4(data.proj_mat[0]);
-  PM4(out_data.view_proj_matrix[0]);
-  PM4(out_data.view_proj_matrix[1]);
-  PM4(out_data.view_proj_matrix[2]);
-  PM4(out_data.view_proj_matrix[3]);
-  PM4(out_data.view_matrix[0]);
-  PM4(out_data.view_matrix[1]);
-  PM4(out_data.view_matrix[2]);
-  PM4(out_data.view_matrix[3]);
-  PV4(out_data.pose_quaternion);
-  PV4(out_data.pose_translation);
-#endif
-
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, 0);
-  glUseProgram(0);
-}
-
-void LateLatch::AddEdsLateLatch(const LateLatchInput& data,
-                                GLuint render_pose_buffer_object) const {
-  LOG_ALWAYS_FATAL_IF(is_app_late_latch_);
-  late_latch_program_.Use();
-
-  // Fall back on internal buffer when none is provided.
-  if (!render_pose_buffer_object)
-    render_pose_buffer_object = metadata_buffer_id_;
-
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING,
-                   render_pose_buffer_object);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, pose_buffer_object_);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, output_buffer_id_);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, input_buffer_id_);
-  LateLatchInput* adata = (LateLatchInput*)glMapBufferRange(
-      GL_SHADER_STORAGE_BUFFER, 0, sizeof(LateLatchInput),
-      GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
-  *adata = data;
-  glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
-  glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, input_buffer_id_);
-  CHECK_GL();
-
-  glDispatchCompute(1, 1, 1);
-  CHECK_GL();
-
-  if (eds_late_latch_output_) {
-    // Capture the output data:
-    CaptureOutputData(eds_late_latch_output_);
-  }
-#if PRINT_MATRIX
-  // Print the composed matrix to stderr:
-  LateLatchOutput out_data;
-  CaptureOutputData(&out_data);
-  CHECK_GL();
-  PE("LL EDS\n");
-  PM4(out_data.view_proj_matrix[0]);
-  PM4(out_data.view_matrix[0]);
-  PV4(out_data.pose_quaternion);
-  PV4(out_data.pose_translation);
-#endif
-
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, 0);
-  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, 0);
-  glUseProgram(0);
-}
-
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libdisplay/native_buffer_queue.cpp b/libs/vr/libdisplay/native_buffer_queue.cpp
deleted file mode 100644
index 1bb05d8..0000000
--- a/libs/vr/libdisplay/native_buffer_queue.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-#include "include/private/dvr/native_buffer_queue.h"
-
-#include <log/log.h>
-#include <sys/epoll.h>
-#define ATRACE_TAG ATRACE_TAG_GRAPHICS
-#include <utils/Trace.h>
-
-#include <array>
-
-#include <dvr/dvr_display_types.h>
-
-namespace android {
-namespace dvr {
-namespace display {
-
-NativeBufferQueue::NativeBufferQueue(
-    EGLDisplay display, const std::shared_ptr<ProducerQueue>& producer_queue,
-    uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
-    size_t capacity)
-    : display_(display),
-      width_(width),
-      height_(height),
-      format_(format),
-      usage_(usage),
-      producer_queue_(producer_queue),
-      buffers_(capacity) {
-  for (size_t i = 0; i < capacity; i++) {
-    size_t slot;
-    // TODO(jwcai) Should change to use BufferViewPort's spec to config.
-    const int ret = producer_queue_->AllocateBuffer(width_, height_, 1, format_,
-                                                    usage_, &slot);
-    if (ret < 0) {
-      ALOGE(
-          "NativeBufferQueue::NativeBufferQueue: Failed to allocate buffer: %s",
-          strerror(-ret));
-      return;
-    }
-
-    ALOGD_IF(TRACE, "NativeBufferQueue::NativeBufferQueue: slot=%zu", slot);
-  }
-}
-
-NativeBufferProducer* NativeBufferQueue::Dequeue() {
-  ATRACE_NAME("NativeBufferQueue::Dequeue");
-  size_t slot;
-  pdx::LocalHandle fence;
-  auto buffer_status = producer_queue_->Dequeue(-1, &slot, &fence);
-  if (!buffer_status) {
-    ALOGE("NativeBufferQueue::Dequeue: Failed to dequeue buffer: %s",
-          buffer_status.GetErrorMessage().c_str());
-    return nullptr;
-  }
-
-  if (buffers_[slot] == nullptr)
-    buffers_[slot] =
-        new NativeBufferProducer(buffer_status.take(), display_, slot);
-
-  ALOGD_IF(TRACE, "NativeBufferQueue::Dequeue: slot=%zu buffer=%p", slot,
-           buffers_[slot].get());
-  return buffers_[slot].get();
-}
-
-}  // namespace display
-}  // namespace dvr
-}  // namespace android
diff --git a/libs/vr/libdisplay/tests/dummy_native_window_tests.cpp b/libs/vr/libdisplay/tests/dummy_native_window_tests.cpp
deleted file mode 100644
index 5f3ff53..0000000
--- a/libs/vr/libdisplay/tests/dummy_native_window_tests.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-#include <private/dvr/dummy_native_window.h>
-#include <gtest/gtest.h>
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-#include <GLES/gl.h>
-#include <GLES/glext.h>
-#include <GLES2/gl2.h>
-
-class DummyNativeWindowTests : public ::testing::Test {
- public:
-  EGLDisplay display_;
-  bool initialized_;
-
-  DummyNativeWindowTests()
-      : display_(nullptr)
-      , initialized_(false)
-  {
-  }
-
-  virtual void SetUp() {
-    display_ = eglGetDisplay(EGL_DEFAULT_DISPLAY);
-
-    ASSERT_NE(nullptr, display_);
-    initialized_ = eglInitialize(display_, nullptr, nullptr);
-
-    ASSERT_TRUE(initialized_);
-  }
-
-  virtual void TearDown() {
-    if (display_ && initialized_) {
-      eglTerminate(display_);
-    }
-  }
-};
-
-// Test that eglCreateWindowSurface works with DummyNativeWindow
-TEST_F(DummyNativeWindowTests, TryCreateEglWindow) {
-  EGLint attribs[] = {
-      EGL_NONE,
-  };
-
-  EGLint num_configs;
-  EGLConfig config;
-  ASSERT_TRUE(eglChooseConfig(display_, attribs, &config, 1, &num_configs));
-
-  std::unique_ptr<android::dvr::DummyNativeWindow> dummy_window(
-      new android::dvr::DummyNativeWindow());
-
-  EGLint context_attribs[] = {
-    EGL_NONE,
-  };
-
-  EGLSurface surface = eglCreateWindowSurface(display_, config,
-                                              dummy_window.get(),
-                                              context_attribs);
-
-  EXPECT_NE(nullptr, surface);
-
-  bool destroyed = eglDestroySurface(display_, surface);
-
-  EXPECT_TRUE(destroyed);
-}
-
diff --git a/libs/vr/libdisplay/tests/graphics_app_tests.cpp b/libs/vr/libdisplay/tests/graphics_app_tests.cpp
deleted file mode 100644
index c592ba9..0000000
--- a/libs/vr/libdisplay/tests/graphics_app_tests.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-#include <dvr/graphics.h>
-#include <gtest/gtest.h>
-
-TEST(GraphicsAppTests, GetNativeDisplayDimensions) {
-  int width, height;
-  dvrGetNativeDisplayDimensions(&width, &height);
-  EXPECT_GT(width, 0);
-  EXPECT_GT(height, 0);
-}
-
-// TODO(jpoichet) How to check it worked?
-TEST(GraphicsAppTests, GraphicsSurfaceSetVisible) {
-  DvrSurfaceParameter surface_params[] = {DVR_SURFACE_PARAMETER_LIST_END};
-  DvrGraphicsContext* context = nullptr;
-  int result = dvrGraphicsContextCreate(surface_params, &context);
-  ASSERT_GE(result, 0);
-  ASSERT_NE(context, nullptr);
-  dvrGraphicsSurfaceSetVisible(context, 0);
-  dvrGraphicsSurfaceSetVisible(context, 1);
-  dvrGraphicsSurfaceSetVisible(context, 2);
-}
-
-// TODO(jpoichet) How to check it worked?
-TEST(GraphicsAppTests, GraphicsSurfaceSetZOrder) {
-  DvrSurfaceParameter surface_params[] = {DVR_SURFACE_PARAMETER_LIST_END};
-  DvrGraphicsContext* context = nullptr;
-  int result = dvrGraphicsContextCreate(surface_params, &context);
-  ASSERT_GE(result, 0);
-  ASSERT_NE(context, nullptr);
-  dvrGraphicsSurfaceSetZOrder(context, -1);
-  dvrGraphicsSurfaceSetZOrder(context, 0);
-  dvrGraphicsSurfaceSetZOrder(context, 1);
-  dvrGraphicsSurfaceSetZOrder(context, 2);
-}
-
-TEST(GraphicsAppTests, GraphicsContext) {
-  DvrGraphicsContext* context = 0;
-  int display_width = 0, display_height = 0;
-  int surface_width = 0, surface_height = 0;
-  float inter_lens_meters = 0.0f;
-  float left_fov[4] = {0.0f};
-  float right_fov[4] = {0.0f};
-  uint64_t vsync_period = 0;
-  int disable_warp = 0;
-  DvrSurfaceParameter surface_params[] = {
-      DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
-      DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
-      DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
-      DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
-      DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
-      DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
-      DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
-      DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
-      DVR_SURFACE_PARAMETER_OUT(VSYNC_PERIOD, &vsync_period),
-      DVR_SURFACE_PARAMETER_LIST_END,
-  };
-  dvrGraphicsContextCreate(surface_params, &context);
-  EXPECT_NE(nullptr, context);
-
-  dvrGraphicsSurfaceSetVisible(context, 1);
-
-  DvrFrameSchedule schedule;
-  int wait_result = dvrGraphicsWaitNextFrame(context, 0, &schedule);
-  EXPECT_EQ(wait_result, 0);
-  EXPECT_GE(schedule.vsync_count, 0u);
-
-  dvrBeginRenderFrame(context);
-
-  // Check range of vsync period from 60fps to 100fps.
-  // TODO(jbates) Once we have stable hardware, clamp this range down further.
-  EXPECT_LE(vsync_period, 1000000000ul / 60ul);
-  EXPECT_GE(vsync_period, 1000000000ul / 100ul);
-
-  dvrPresent(context);
-  dvrGraphicsContextDestroy(context);
-}
-
-TEST(GraphicsAppTests, CustomSurfaceSize) {
-  DvrGraphicsContext* context = 0;
-  int display_width = 0, display_height = 0;
-  int surface_width = 0, surface_height = 0;
-  float inter_lens_meters = 0.0f;
-  float left_fov[4] = {0.0f};
-  float right_fov[4] = {0.0f};
-  int disable_warp = 0;
-  int req_width = 256, req_height = 128;
-  DvrSurfaceParameter surface_params[] = {
-      DVR_SURFACE_PARAMETER_IN(WIDTH, req_width),
-      DVR_SURFACE_PARAMETER_IN(HEIGHT, req_height),
-      DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
-      DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
-      DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
-      DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
-      DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
-      DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
-      DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
-      DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
-      DVR_SURFACE_PARAMETER_LIST_END,
-  };
-  dvrGraphicsContextCreate(surface_params, &context);
-  EXPECT_NE(nullptr, context);
-
-  EXPECT_EQ(req_width, surface_width);
-  EXPECT_EQ(req_height, surface_height);
-  dvrGraphicsContextDestroy(context);
-}
-