Add DaydreamVR native libraries and services
Upstreaming the main VR system components from master-dreamos-dev
into goog/master.
Bug: None
Test: `m -j32` succeeds. Sailfish boots and basic_vr sample app works
Change-Id: I853015872afc443aecee10411ef2d6b79184d051
diff --git a/libs/vr/libdisplay/Android.mk b/libs/vr/libdisplay/Android.mk
new file mode 100644
index 0000000..670bdcd
--- /dev/null
+++ b/libs/vr/libdisplay/Android.mk
@@ -0,0 +1,96 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+sourceFiles := \
+ native_window.cpp \
+ native_buffer_queue.cpp \
+ display_client.cpp \
+ display_manager_client.cpp \
+ display_manager_client_impl.cpp \
+ display_rpc.cpp \
+ dummy_native_window.cpp \
+ gl_fenced_flush.cpp \
+ graphics.cpp \
+ late_latch.cpp \
+ video_mesh_surface_client.cpp \
+ vsync_client.cpp \
+ vsync_client_api.cpp \
+ screenshot_client.cpp \
+ frame_history.cpp
+
+includeFiles := \
+ $(LOCAL_PATH)/include \
+ frameworks/native/vulkan/include
+
+sharedLibraries := \
+ libbase \
+ libcutils \
+ liblog \
+ libutils \
+ libEGL \
+ libGLESv2 \
+ libvulkan \
+ libui \
+ libgui \
+ libhardware \
+ libsync
+
+staticLibraries := \
+ libchrome \
+ libbufferhub \
+ libbufferhubqueue \
+ libdvrcommon \
+ libdvrgraphics \
+ libsensor \
+ libpdx_default_transport \
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_SRC_FILES := $(sourceFiles)
+LOCAL_C_INCLUDES := $(includeFiles)
+#LOCAL_CPPFLAGS := -UNDEBUG -DDEBUG -O0 -g
+LOCAL_CFLAGS += -DLOG_TAG=\"libdisplay\"
+LOCAL_CFLAGS += -DTRACE=0
+LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_GRAPHICS
+LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES -DEGL_EGLEXT_PROTOTYPES
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(includeFiles)
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_MODULE := libdisplay
+include $(BUILD_STATIC_LIBRARY)
+
+
+testFiles := \
+ tests/graphics_app_tests.cpp
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := graphics_app_tests
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := \
+ $(testFiles) \
+
+LOCAL_C_INCLUDES := \
+ $(includeFiles) \
+
+LOCAL_SHARED_LIBRARIES := \
+ $(sharedLibraries) \
+
+LOCAL_STATIC_LIBRARIES := \
+ libdisplay \
+ $(staticLibraries) \
+
+include $(BUILD_NATIVE_TEST)
diff --git a/libs/vr/libdisplay/display_client.cpp b/libs/vr/libdisplay/display_client.cpp
new file mode 100644
index 0000000..cfb346d
--- /dev/null
+++ b/libs/vr/libdisplay/display_client.cpp
@@ -0,0 +1,276 @@
+#include "include/private/dvr/display_client.h"
+
+#include <cutils/log.h>
+#include <cutils/native_handle.h>
+#include <pdx/default_transport/client_channel.h>
+#include <pdx/default_transport/client_channel_factory.h>
+#include <pdx/status.h>
+
+#include <mutex>
+
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/late_latch.h>
+#include <private/dvr/native_buffer.h>
+
+using android::pdx::LocalHandle;
+using android::pdx::LocalChannelHandle;
+using android::pdx::Status;
+using android::pdx::Transaction;
+using android::pdx::rpc::IfAnyOf;
+
+namespace android {
+namespace dvr {
+
+SurfaceClient::SurfaceClient(LocalChannelHandle channel_handle,
+ SurfaceType type)
+ : Client{pdx::default_transport::ClientChannel::Create(
+ std::move(channel_handle))},
+ type_(type) {}
+
+SurfaceClient::SurfaceClient(const std::string& endpoint_path, SurfaceType type)
+ : Client{pdx::default_transport::ClientChannelFactory::Create(
+ endpoint_path),
+ kInfiniteTimeout},
+ type_(type) {}
+
+int SurfaceClient::GetMetadataBufferFd(LocalHandle* out_fd) {
+ auto buffer_producer = GetMetadataBuffer();
+ if (!buffer_producer)
+ return -ENOMEM;
+
+ *out_fd = buffer_producer->GetBlobFd();
+ return 0;
+}
+
+std::shared_ptr<BufferProducer> SurfaceClient::GetMetadataBuffer() {
+ if (!metadata_buffer_) {
+ auto status = InvokeRemoteMethod<DisplayRPC::GetMetadataBuffer>();
+ if (!status) {
+ ALOGE(
+ "SurfaceClient::AllocateMetadataBuffer: Failed to allocate buffer: "
+ "%s",
+ status.GetErrorMessage().c_str());
+ return nullptr;
+ }
+
+ metadata_buffer_ = BufferProducer::Import(status.take());
+ }
+
+ return metadata_buffer_;
+}
+
+DisplaySurfaceClient::DisplaySurfaceClient(int width, int height, int format,
+ int usage, int flags)
+ : BASE(DisplayRPC::kClientPath, SurfaceTypeEnum::Normal),
+ width_(width),
+ height_(height),
+ format_(format),
+ usage_(usage),
+ flags_(flags),
+ z_order_(0),
+ visible_(true),
+ exclude_from_blur_(false),
+ blur_behind_(true),
+ mapped_metadata_buffer_(nullptr) {
+ auto status = InvokeRemoteMethod<DisplayRPC::CreateSurface>(
+ width, height, format, usage, flags);
+ if (!status) {
+ ALOGE(
+ "DisplaySurfaceClient::DisplaySurfaceClient: Failed to create display "
+ "surface: %s",
+ status.GetErrorMessage().c_str());
+ Close(status.error());
+ }
+}
+
+void DisplaySurfaceClient::SetVisible(bool visible) {
+ SetAttributes({{DisplaySurfaceAttributeEnum::Visible,
+ DisplaySurfaceAttributeValue{visible}}});
+}
+
+void DisplaySurfaceClient::SetZOrder(int z_order) {
+ SetAttributes({{DisplaySurfaceAttributeEnum::ZOrder,
+ DisplaySurfaceAttributeValue{z_order}}});
+}
+
+void DisplaySurfaceClient::SetExcludeFromBlur(bool exclude_from_blur) {
+ SetAttributes({{DisplaySurfaceAttributeEnum::ExcludeFromBlur,
+ DisplaySurfaceAttributeValue{exclude_from_blur}}});
+}
+
+void DisplaySurfaceClient::SetBlurBehind(bool blur_behind) {
+ SetAttributes({{DisplaySurfaceAttributeEnum::BlurBehind,
+ DisplaySurfaceAttributeValue{blur_behind}}});
+}
+
+void DisplaySurfaceClient::SetAttributes(
+ const DisplaySurfaceAttributes& attributes) {
+ Status<int> status =
+ InvokeRemoteMethod<DisplayRPC::SetAttributes>(attributes);
+ if (!status) {
+ ALOGE(
+ "DisplaySurfaceClient::SetAttributes: Failed to set display surface "
+ "attributes: %s",
+ status.GetErrorMessage().c_str());
+ return;
+ }
+
+ // Set the local cached copies of the attributes we care about from the full
+ // set of attributes sent to the display service.
+ for (const auto& attribute : attributes) {
+ const auto& key = attribute.first;
+ const auto* variant = &attribute.second;
+ bool invalid_value = false;
+ switch (key) {
+ case DisplaySurfaceAttributeEnum::Visible:
+ invalid_value =
+ !IfAnyOf<int32_t, int64_t, bool>::Get(variant, &visible_);
+ break;
+ case DisplaySurfaceAttributeEnum::ZOrder:
+ invalid_value = !IfAnyOf<int32_t>::Get(variant, &z_order_);
+ break;
+ case DisplaySurfaceAttributeEnum::ExcludeFromBlur:
+ invalid_value =
+ !IfAnyOf<int32_t, int64_t, bool>::Get(variant, &exclude_from_blur_);
+ break;
+ case DisplaySurfaceAttributeEnum::BlurBehind:
+ invalid_value =
+ !IfAnyOf<int32_t, int64_t, bool>::Get(variant, &blur_behind_);
+ break;
+ }
+
+ if (invalid_value) {
+ ALOGW(
+ "DisplaySurfaceClient::SetAttributes: Failed to set display "
+ "surface attribute '%s' because of incompatible type: %d",
+ DisplaySurfaceAttributeEnum::ToString(key).c_str(), variant->index());
+ }
+ }
+}
+
+std::shared_ptr<BufferProducer> DisplaySurfaceClient::AllocateBuffer(
+ uint32_t* buffer_index) {
+ auto status = InvokeRemoteMethod<DisplayRPC::AllocateBuffer>();
+ if (!status) {
+ ALOGE("DisplaySurfaceClient::AllocateBuffer: Failed to allocate buffer: %s",
+ status.GetErrorMessage().c_str());
+ return nullptr;
+ }
+
+ if (buffer_index)
+ *buffer_index = status.get().first;
+ return BufferProducer::Import(status.take().second);
+}
+
+volatile DisplaySurfaceMetadata* DisplaySurfaceClient::GetMetadataBufferPtr() {
+ if (!mapped_metadata_buffer_) {
+ if (auto buffer_producer = GetMetadataBuffer()) {
+ void* addr = nullptr;
+ const int ret = buffer_producer->GetBlobReadWritePointer(
+ sizeof(DisplaySurfaceMetadata), &addr);
+ if (ret < 0) {
+ ALOGE(
+ "DisplaySurfaceClient::GetMetadataBufferPtr: Failed to map surface "
+ "metadata: %s",
+ strerror(-ret));
+ return nullptr;
+ }
+ mapped_metadata_buffer_ = static_cast<DisplaySurfaceMetadata*>(addr);
+ }
+ }
+
+ return mapped_metadata_buffer_;
+}
+
+LocalChannelHandle DisplaySurfaceClient::CreateVideoMeshSurface() {
+ auto status = InvokeRemoteMethod<DisplayRPC::CreateVideoMeshSurface>();
+ if (!status) {
+ ALOGE(
+ "DisplaySurfaceClient::CreateVideoMeshSurface: Failed to create "
+ "video mesh surface: %s",
+ status.GetErrorMessage().c_str());
+ }
+ return status.take();
+}
+
+DisplayClient::DisplayClient(int* error)
+ : BASE(pdx::default_transport::ClientChannelFactory::Create(
+ DisplayRPC::kClientPath),
+ kInfiniteTimeout) {
+ if (error)
+ *error = Client::error();
+}
+
+int DisplayClient::GetDisplayMetrics(SystemDisplayMetrics* metrics) {
+ auto status = InvokeRemoteMethod<DisplayRPC::GetMetrics>();
+ if (!status) {
+ ALOGE("DisplayClient::GetDisplayMetrics: Failed to get metrics: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+
+ *metrics = status.get();
+ return 0;
+}
+
+pdx::Status<void> DisplayClient::SetViewerParams(const ViewerParams& viewer_params) {
+ auto status = InvokeRemoteMethod<DisplayRPC::SetViewerParams>(viewer_params);
+ if (!status) {
+ ALOGE("DisplayClient::SetViewerParams: Failed to set viewer params: %s",
+ status.GetErrorMessage().c_str());
+ }
+ return status;
+}
+
+int DisplayClient::GetLastFrameEdsTransform(LateLatchOutput* ll_out) {
+ auto status = InvokeRemoteMethod<DisplayRPC::GetEdsCapture>();
+ if (!status) {
+ ALOGE(
+ "DisplayClient::GetLastFrameLateLatch: Failed to get most recent late"
+ " latch: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+
+ if (status.get().size() != sizeof(LateLatchOutput)) {
+ ALOGE(
+ "DisplayClient::GetLastFrameLateLatch: Error expected to receive %zu "
+ "bytes but received %zu",
+ sizeof(LateLatchOutput), status.get().size());
+ return -EIO;
+ }
+
+ *ll_out = *reinterpret_cast<const LateLatchOutput*>(status.get().data());
+ return 0;
+}
+
+int DisplayClient::EnterVrMode() {
+ auto status = InvokeRemoteMethod<DisplayRPC::EnterVrMode>();
+ if (!status) {
+ ALOGE(
+ "DisplayClient::EnterVrMode: Failed to set display service to Vr mode");
+ return -status.error();
+ }
+
+ return 0;
+}
+
+int DisplayClient::ExitVrMode() {
+ auto status = InvokeRemoteMethod<DisplayRPC::ExitVrMode>();
+ if (!status) {
+ ALOGE(
+ "DisplayClient::ExitVrMode: Failed to revert display service from Vr "
+ "mode");
+ return -status.error();
+ }
+
+ return 0;
+}
+
+std::unique_ptr<DisplaySurfaceClient> DisplayClient::CreateDisplaySurface(
+ int width, int height, int format, int usage, int flags) {
+ return DisplaySurfaceClient::Create(width, height, format, usage, flags);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/display_manager_client.cpp b/libs/vr/libdisplay/display_manager_client.cpp
new file mode 100644
index 0000000..f454b08
--- /dev/null
+++ b/libs/vr/libdisplay/display_manager_client.cpp
@@ -0,0 +1,109 @@
+#include "include/private/dvr/display_manager_client.h"
+
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_manager_client_impl.h>
+
+using android::dvr::DisplaySurfaceAttributeEnum;
+
+extern "C" {
+
+struct DvrDisplayManagerClient {
+ DvrDisplayManagerClient()
+ : client(android::dvr::DisplayManagerClient::Create()) {}
+ ~DvrDisplayManagerClient() {}
+
+ std::unique_ptr<android::dvr::DisplayManagerClient> client;
+};
+
+struct DvrDisplayManagerClientSurfaceList {
+ DvrDisplayManagerClientSurfaceList(
+ std::vector<android::dvr::DisplaySurfaceInfo> surface_list)
+ : list(std::move(surface_list)) {}
+ ~DvrDisplayManagerClientSurfaceList() {}
+
+ std::vector<android::dvr::DisplaySurfaceInfo> list;
+};
+
+struct DvrDisplayManagerClientSurfaceBuffers {
+ DvrDisplayManagerClientSurfaceBuffers(
+ std::vector<std::unique_ptr<android::dvr::BufferConsumer>> buffer_list)
+ : list(std::move(buffer_list)) {}
+ ~DvrDisplayManagerClientSurfaceBuffers() {}
+
+ std::vector<std::unique_ptr<android::dvr::BufferConsumer>> list;
+};
+
+DvrDisplayManagerClient* dvrDisplayManagerClientCreate() {
+ return new DvrDisplayManagerClient();
+}
+
+void dvrDisplayManagerClientDestroy(DvrDisplayManagerClient* client) {
+ delete client;
+}
+
+int dvrDisplayManagerClientGetSurfaceList(
+ DvrDisplayManagerClient* client,
+ DvrDisplayManagerClientSurfaceList** surface_list) {
+ std::vector<android::dvr::DisplaySurfaceInfo> list;
+ int ret = client->client->GetSurfaceList(&list);
+ if (ret < 0)
+ return ret;
+
+ *surface_list = new DvrDisplayManagerClientSurfaceList(std::move(list));
+ return ret;
+}
+
+void dvrDisplayManagerClientSurfaceListDestroy(
+ DvrDisplayManagerClientSurfaceList* surface_list) {
+ delete surface_list;
+}
+
+size_t dvrDisplayManagerClientSurfaceListGetSize(
+ DvrDisplayManagerClientSurfaceList* surface_list) {
+ return surface_list->list.size();
+}
+
+int dvrDisplayManagerClientSurfaceListGetSurfaceId(
+ DvrDisplayManagerClientSurfaceList* surface_list, size_t index) {
+ return surface_list->list[index].surface_id;
+}
+
+int dvrDisplayManagerClientSurfaceListGetClientZOrder(
+ DvrDisplayManagerClientSurfaceList* surface_list, size_t index) {
+ return surface_list->list[index].ClientZOrder();
+}
+
+bool dvrDisplayManagerClientSurfaceListGetClientIsVisible(
+ DvrDisplayManagerClientSurfaceList* surface_list, size_t index) {
+ return surface_list->list[index].IsClientVisible();
+}
+
+int dvrDisplayManagerClientGetSurfaceBuffers(
+ DvrDisplayManagerClient* client, int surface_id,
+ DvrDisplayManagerClientSurfaceBuffers** surface_buffers) {
+ std::vector<std::unique_ptr<android::dvr::BufferConsumer>> buffer_list;
+ int ret = client->client->GetSurfaceBuffers(surface_id, &buffer_list);
+ if (ret < 0)
+ return ret;
+
+ *surface_buffers =
+ new DvrDisplayManagerClientSurfaceBuffers(std::move(buffer_list));
+ return ret;
+}
+
+void dvrDisplayManagerClientSurfaceBuffersDestroy(
+ DvrDisplayManagerClientSurfaceBuffers* surface_buffers) {
+ delete surface_buffers;
+}
+
+size_t dvrDisplayManagerClientSurfaceBuffersGetSize(
+ DvrDisplayManagerClientSurfaceBuffers* surface_buffers) {
+ return surface_buffers->list.size();
+}
+
+int dvrDisplayManagerClientSurfaceBuffersGetFd(
+ DvrDisplayManagerClientSurfaceBuffers* surface_buffers, size_t index) {
+ return surface_buffers->list[index]->event_fd();
+}
+
+} // extern "C"
diff --git a/libs/vr/libdisplay/display_manager_client_impl.cpp b/libs/vr/libdisplay/display_manager_client_impl.cpp
new file mode 100644
index 0000000..82198b9
--- /dev/null
+++ b/libs/vr/libdisplay/display_manager_client_impl.cpp
@@ -0,0 +1,57 @@
+#include "include/private/dvr/display_manager_client_impl.h"
+
+#include <pdx/default_transport/client_channel_factory.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_rpc.h>
+#include <utils/Log.h>
+
+using android::pdx::LocalChannelHandle;
+using android::pdx::Transaction;
+
+namespace android {
+namespace dvr {
+
+DisplayManagerClient::DisplayManagerClient()
+ : BASE(pdx::default_transport::ClientChannelFactory::Create(
+ DisplayManagerRPC::kClientPath)) {}
+
+DisplayManagerClient::~DisplayManagerClient() {}
+
+int DisplayManagerClient::GetSurfaceList(
+ std::vector<DisplaySurfaceInfo>* surface_list) {
+ auto status = InvokeRemoteMethod<DisplayManagerRPC::GetSurfaceList>();
+ if (!status) {
+ ALOGE(
+ "DisplayManagerClient::GetSurfaceList: Failed to get surface info: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+
+ *surface_list = status.take();
+ return 0;
+}
+
+int DisplayManagerClient::GetSurfaceBuffers(
+ int surface_id, std::vector<std::unique_ptr<BufferConsumer>>* consumers) {
+ auto status =
+ InvokeRemoteMethod<DisplayManagerRPC::GetSurfaceBuffers>(surface_id);
+ if (!status) {
+ ALOGE(
+ "DisplayManagerClient::GetSurfaceBuffers: Failed to get buffers for "
+ "surface_id=%d: %s",
+ surface_id, status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+
+ std::vector<std::unique_ptr<BufferConsumer>> consumer_buffers;
+ std::vector<LocalChannelHandle> channel_handles = status.take();
+ for (auto&& handle : channel_handles) {
+ consumer_buffers.push_back(BufferConsumer::Import(std::move(handle)));
+ }
+
+ *consumers = std::move(consumer_buffers);
+ return 0;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/display_rpc.cpp b/libs/vr/libdisplay/display_rpc.cpp
new file mode 100644
index 0000000..f5693bd
--- /dev/null
+++ b/libs/vr/libdisplay/display_rpc.cpp
@@ -0,0 +1,12 @@
+#include "include/private/dvr/display_rpc.h"
+
+namespace android {
+namespace dvr {
+
+constexpr char DisplayRPC::kClientPath[];
+constexpr char DisplayManagerRPC::kClientPath[];
+constexpr char DisplayScreenshotRPC::kClientPath[];
+constexpr char DisplayVSyncRPC::kClientPath[];
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/dummy_native_window.cpp b/libs/vr/libdisplay/dummy_native_window.cpp
new file mode 100644
index 0000000..5547f53
--- /dev/null
+++ b/libs/vr/libdisplay/dummy_native_window.cpp
@@ -0,0 +1,75 @@
+#include "include/private/dvr/dummy_native_window.h"
+
+#include <utils/Errors.h>
+
+namespace {
+// Dummy functions required for an ANativeWindow Implementation.
+int F1(struct ANativeWindow*, int) { return 0; }
+int F2(struct ANativeWindow*, struct ANativeWindowBuffer**) { return 0; }
+int F3(struct ANativeWindow*, struct ANativeWindowBuffer*) { return 0; }
+int F4(struct ANativeWindow*, struct ANativeWindowBuffer**, int*) { return 0; }
+int F5(struct ANativeWindow*, struct ANativeWindowBuffer*, int) { return 0; }
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+DummyNativeWindow::DummyNativeWindow() {
+ ANativeWindow::setSwapInterval = F1;
+ ANativeWindow::dequeueBuffer = F4;
+ ANativeWindow::cancelBuffer = F5;
+ ANativeWindow::queueBuffer = F5;
+ ANativeWindow::query = Query;
+ ANativeWindow::perform = Perform;
+
+ ANativeWindow::dequeueBuffer_DEPRECATED = F2;
+ ANativeWindow::cancelBuffer_DEPRECATED = F3;
+ ANativeWindow::lockBuffer_DEPRECATED = F3;
+ ANativeWindow::queueBuffer_DEPRECATED = F3;
+}
+
+int DummyNativeWindow::Query(const ANativeWindow*, int what, int* value) {
+ switch (what) {
+ case NATIVE_WINDOW_WIDTH:
+ case NATIVE_WINDOW_HEIGHT:
+ case NATIVE_WINDOW_FORMAT:
+ case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
+ case NATIVE_WINDOW_CONCRETE_TYPE:
+ case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER:
+ case NATIVE_WINDOW_DEFAULT_WIDTH:
+ case NATIVE_WINDOW_DEFAULT_HEIGHT:
+ case NATIVE_WINDOW_TRANSFORM_HINT:
+ *value = 0;
+ return NO_ERROR;
+ }
+
+ *value = 0;
+ return BAD_VALUE;
+}
+
+int DummyNativeWindow::Perform(ANativeWindow*, int operation, ...) {
+ switch (operation) {
+ case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
+ case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
+ case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
+ case NATIVE_WINDOW_SET_USAGE:
+ case NATIVE_WINDOW_CONNECT:
+ case NATIVE_WINDOW_DISCONNECT:
+ case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
+ case NATIVE_WINDOW_API_CONNECT:
+ case NATIVE_WINDOW_API_DISCONNECT:
+ case NATIVE_WINDOW_SET_BUFFER_COUNT:
+ case NATIVE_WINDOW_SET_BUFFERS_DATASPACE:
+ case NATIVE_WINDOW_SET_SCALING_MODE:
+ return NO_ERROR;
+ case NATIVE_WINDOW_LOCK:
+ case NATIVE_WINDOW_UNLOCK_AND_POST:
+ case NATIVE_WINDOW_SET_CROP:
+ case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
+ return INVALID_OPERATION;
+ }
+ return NAME_NOT_FOUND;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/frame_history.cpp b/libs/vr/libdisplay/frame_history.cpp
new file mode 100644
index 0000000..67e4a09
--- /dev/null
+++ b/libs/vr/libdisplay/frame_history.cpp
@@ -0,0 +1,147 @@
+#include <private/dvr/frame_history.h>
+
+#include <cutils/log.h>
+#include <errno.h>
+#include <sync/sync.h>
+
+#include <pdx/file_handle.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/sync_util.h>
+
+using android::pdx::LocalHandle;
+
+constexpr int kNumFramesToUseForSchedulePrediction = 10;
+constexpr int kDefaultVsyncIntervalPrediction = 1;
+constexpr int kMaxVsyncIntervalPrediction = 4;
+constexpr int kDefaultPendingFrameBufferSize = 10;
+
+namespace android {
+namespace dvr {
+
+FrameHistory::PendingFrame::PendingFrame()
+ : start_ns(0), scheduled_vsync(0), scheduled_finish_ns(0) {}
+
+FrameHistory::PendingFrame::PendingFrame(int64_t start_ns,
+ uint32_t scheduled_vsync,
+ int64_t scheduled_finish_ns,
+ LocalHandle&& fence)
+ : start_ns(start_ns), scheduled_vsync(scheduled_vsync),
+ scheduled_finish_ns(scheduled_finish_ns), fence(std::move(fence)) {}
+
+FrameHistory::FrameHistory() : FrameHistory(kDefaultPendingFrameBufferSize) {}
+
+FrameHistory::FrameHistory(int pending_frame_buffer_size)
+ : pending_frames_(pending_frame_buffer_size),
+ finished_frames_(pending_frame_buffer_size),
+ frame_duration_history_(kNumFramesToUseForSchedulePrediction) {}
+
+void FrameHistory::Reset(int pending_frame_buffer_size) {
+ pending_frames_.Reset(pending_frame_buffer_size);
+ finished_frames_.Reset(pending_frame_buffer_size);
+ frame_duration_history_.Clear();
+}
+
+void FrameHistory::OnFrameStart(uint32_t scheduled_vsync,
+ int64_t scheduled_finish_ns) {
+ if (!pending_frames_.IsEmpty() && !pending_frames_.Back().fence) {
+ // If we don't have a fence set for the previous frame it's because
+ // OnFrameStart() was called twice in a row with no OnFrameSubmit() call. In
+ // that case throw out the pending frame data for the last frame.
+ pending_frames_.PopBack();
+ }
+
+ if (pending_frames_.IsFull()) {
+ ALOGW("Pending frames buffer is full. Discarding pending frame data.");
+ }
+
+ pending_frames_.Append(PendingFrame(GetSystemClockNs(), scheduled_vsync,
+ scheduled_finish_ns, LocalHandle()));
+}
+
+void FrameHistory::OnFrameSubmit(LocalHandle&& fence) {
+ // Add the fence to the previous frame data in pending_frames so we can
+ // track when it finishes.
+ if (!pending_frames_.IsEmpty() && !pending_frames_.Back().fence) {
+ if (fence && pending_frames_.Back().scheduled_vsync != UINT32_MAX)
+ pending_frames_.Back().fence = std::move(fence);
+ else
+ pending_frames_.PopBack();
+ }
+}
+
+void FrameHistory::CheckForFinishedFrames() {
+ if (pending_frames_.IsEmpty())
+ return;
+
+ android::dvr::FenceInfoBuffer fence_info_buffer;
+ while (!pending_frames_.IsEmpty()) {
+ const auto& pending_frame = pending_frames_.Front();
+ if (!pending_frame.fence) {
+ // The frame hasn't been submitted yet, so there's nothing more to do
+ break;
+ }
+
+ int64_t fence_signaled_time = -1;
+ int fence = pending_frame.fence.Get();
+ int sync_result = sync_wait(fence, 0);
+ if (sync_result == 0) {
+ int fence_signaled_result =
+ GetFenceSignaledTimestamp(fence, &fence_info_buffer,
+ &fence_signaled_time);
+ if (fence_signaled_result < 0) {
+ ALOGE("Failed getting signaled timestamp from fence");
+ } else {
+ // The frame is finished. Record the duration and move the frame data
+ // from pending_frames_ to finished_frames_.
+ DvrFrameScheduleResult schedule_result = {};
+ schedule_result.vsync_count = pending_frame.scheduled_vsync;
+ schedule_result.scheduled_frame_finish_ns =
+ pending_frame.scheduled_finish_ns;
+ schedule_result.frame_finish_offset_ns =
+ fence_signaled_time - pending_frame.scheduled_finish_ns;
+ finished_frames_.Append(schedule_result);
+ frame_duration_history_.Append(
+ fence_signaled_time - pending_frame.start_ns);
+ }
+ pending_frames_.PopFront();
+ } else {
+ if (errno != ETIME) {
+ ALOGE("sync_wait on frame fence failed. fence=%d errno=%d (%s).",
+ fence, errno, strerror(errno));
+ }
+ break;
+ }
+ }
+}
+
+int FrameHistory::PredictNextFrameVsyncInterval(int64_t vsync_period_ns) const {
+ if (frame_duration_history_.IsEmpty())
+ return kDefaultVsyncIntervalPrediction;
+
+ double total = 0;
+ for (size_t i = 0; i < frame_duration_history_.GetSize(); ++i)
+ total += frame_duration_history_.Get(i);
+ double avg_duration = total / frame_duration_history_.GetSize();
+
+ return std::min(kMaxVsyncIntervalPrediction,
+ static_cast<int>(avg_duration / vsync_period_ns) + 1);
+}
+
+int FrameHistory::GetPreviousFrameResults(DvrFrameScheduleResult* results,
+ int in_result_count) {
+ int out_result_count =
+ std::min(in_result_count, static_cast<int>(finished_frames_.GetSize()));
+ for (int i = 0; i < out_result_count; ++i) {
+ results[i] = finished_frames_.Get(0);
+ finished_frames_.PopFront();
+ }
+ return out_result_count;
+}
+
+uint32_t FrameHistory::GetCurrentFrameVsync() const {
+ return pending_frames_.IsEmpty() ?
+ UINT32_MAX : pending_frames_.Back().scheduled_vsync;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/gl_fenced_flush.cpp b/libs/vr/libdisplay/gl_fenced_flush.cpp
new file mode 100644
index 0000000..64b2e99
--- /dev/null
+++ b/libs/vr/libdisplay/gl_fenced_flush.cpp
@@ -0,0 +1,39 @@
+#include "include/private/dvr/gl_fenced_flush.h"
+
+#include <EGL/eglext.h>
+#include <GLES3/gl31.h>
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <utils/Trace.h>
+
+#include <base/logging.h>
+
+using android::pdx::LocalHandle;
+
+namespace android {
+namespace dvr {
+
+LocalHandle CreateGLSyncAndFlush(EGLDisplay display) {
+ ATRACE_NAME("CreateGLSyncAndFlush");
+
+ EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID,
+ EGL_NO_NATIVE_FENCE_FD_ANDROID, EGL_NONE};
+ EGLSyncKHR sync_point =
+ eglCreateSyncKHR(display, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs);
+ glFlush();
+ if (sync_point == EGL_NO_SYNC_KHR) {
+ LOG(ERROR) << "sync_point == EGL_NO_SYNC_KHR";
+ return LocalHandle();
+ }
+ EGLint fence_fd = eglDupNativeFenceFDANDROID(display, sync_point);
+ eglDestroySyncKHR(display, sync_point);
+
+ if (fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
+ LOG(ERROR) << "fence_fd == EGL_NO_NATIVE_FENCE_FD_ANDROID";
+ return LocalHandle();
+ }
+ return LocalHandle(fence_fd);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/graphics.cpp b/libs/vr/libdisplay/graphics.cpp
new file mode 100644
index 0000000..d599616
--- /dev/null
+++ b/libs/vr/libdisplay/graphics.cpp
@@ -0,0 +1,1587 @@
+#include <dvr/graphics.h>
+
+#include <sys/timerfd.h>
+#include <array>
+#include <vector>
+
+#include <cutils/log.h>
+#include <utils/Trace.h>
+
+#ifndef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_USE_PLATFORM_ANDROID_KHR 1
+#endif
+#include <vulkan/vulkan.h>
+
+#include <pdx/file_handle.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/debug.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/frame_history.h>
+#include <private/dvr/gl_fenced_flush.h>
+#include <private/dvr/graphics/vr_gl_extensions.h>
+#include <private/dvr/graphics_private.h>
+#include <private/dvr/late_latch.h>
+#include <private/dvr/native_buffer_queue.h>
+#include <private/dvr/sensor_constants.h>
+#include <private/dvr/video_mesh_surface_client.h>
+#include <private/dvr/vsync_client.h>
+
+#include <android/native_window.h>
+
+#ifndef EGL_CONTEXT_MAJOR_VERSION
+#define EGL_CONTEXT_MAJOR_VERSION 0x3098
+#define EGL_CONTEXT_MINOR_VERSION 0x30FB
+#endif
+
+using android::pdx::LocalHandle;
+using android::pdx::LocalChannelHandle;
+
+using android::dvr::DisplaySurfaceAttributeEnum;
+using android::dvr::DisplaySurfaceAttributeValue;
+
+namespace {
+
+constexpr int kDefaultDisplaySurfaceUsage =
+ GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
+constexpr int kDefaultDisplaySurfaceFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+// TODO(alexst): revisit this count when HW encode is available for casting.
+constexpr int kDefaultBufferCount = 4;
+
+// Use with dvrBeginRenderFrame to disable EDS for the current frame.
+constexpr float32x4_t DVR_POSE_NO_EDS = {10.0f, 0.0f, 0.0f, 0.0f};
+
+// Use with dvrBeginRenderFrame to indicate that GPU late-latching is being used
+// for determining the render pose.
+constexpr float32x4_t DVR_POSE_LATE_LATCH = {20.0f, 0.0f, 0.0f, 0.0f};
+
+#ifndef NDEBUG
+
+static const char* GetGlCallbackType(GLenum type) {
+ switch (type) {
+ case GL_DEBUG_TYPE_ERROR_KHR:
+ return "ERROR";
+ case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR:
+ return "DEPRECATED_BEHAVIOR";
+ case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR:
+ return "UNDEFINED_BEHAVIOR";
+ case GL_DEBUG_TYPE_PORTABILITY_KHR:
+ return "PORTABILITY";
+ case GL_DEBUG_TYPE_PERFORMANCE_KHR:
+ return "PERFORMANCE";
+ case GL_DEBUG_TYPE_OTHER_KHR:
+ return "OTHER";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void on_gl_error(GLenum /*source*/, GLenum type, GLuint /*id*/,
+ GLenum severity, GLsizei /*length*/,
+ const char* message, const void* /*user_param*/) {
+ char msg[400];
+ snprintf(msg, sizeof(msg), "[" __FILE__ ":%u] GL %s: %s", __LINE__,
+ GetGlCallbackType(type), message);
+ switch (severity) {
+ case GL_DEBUG_SEVERITY_LOW_KHR:
+ ALOGI("%s", msg);
+ break;
+ case GL_DEBUG_SEVERITY_MEDIUM_KHR:
+ ALOGW("%s", msg);
+ break;
+ case GL_DEBUG_SEVERITY_HIGH_KHR:
+ ALOGE("%s", msg);
+ break;
+ }
+ fprintf(stderr, "%s\n", msg);
+}
+
+#endif
+
+int DvrToHalSurfaceFormat(int dvr_surface_format) {
+ switch (dvr_surface_format) {
+ case DVR_SURFACE_FORMAT_RGBA_8888:
+ return HAL_PIXEL_FORMAT_RGBA_8888;
+ case DVR_SURFACE_FORMAT_RGB_565:
+ return HAL_PIXEL_FORMAT_RGB_565;
+ default:
+ return HAL_PIXEL_FORMAT_RGBA_8888;
+ }
+}
+
+int SelectEGLConfig(EGLDisplay dpy, EGLint* attr, unsigned format,
+ EGLConfig* config) {
+ std::array<EGLint, 4> desired_rgba;
+ switch (format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ desired_rgba = {{8, 8, 8, 8}};
+ break;
+ case HAL_PIXEL_FORMAT_RGB_565:
+ desired_rgba = {{5, 6, 5, 0}};
+ break;
+ default:
+ ALOGE("Unsupported framebuffer pixel format %d", format);
+ return -1;
+ }
+
+ EGLint max_configs = 0;
+ if (eglGetConfigs(dpy, NULL, 0, &max_configs) == EGL_FALSE) {
+ ALOGE("No EGL configurations available?!");
+ return -1;
+ }
+
+ std::vector<EGLConfig> configs(max_configs);
+
+ EGLint num_configs;
+ if (eglChooseConfig(dpy, attr, &configs[0], max_configs, &num_configs) ==
+ EGL_FALSE) {
+ ALOGE("eglChooseConfig failed");
+ return -1;
+ }
+
+ std::array<EGLint, 4> config_rgba;
+ for (int i = 0; i < num_configs; i++) {
+ eglGetConfigAttrib(dpy, configs[i], EGL_RED_SIZE, &config_rgba[0]);
+ eglGetConfigAttrib(dpy, configs[i], EGL_GREEN_SIZE, &config_rgba[1]);
+ eglGetConfigAttrib(dpy, configs[i], EGL_BLUE_SIZE, &config_rgba[2]);
+ eglGetConfigAttrib(dpy, configs[i], EGL_ALPHA_SIZE, &config_rgba[3]);
+ if (config_rgba == desired_rgba) {
+ *config = configs[i];
+ return 0;
+ }
+ }
+
+ ALOGE("Cannot find a matching EGL config");
+ return -1;
+}
+
+void DestroyEglContext(EGLDisplay egl_display, EGLContext* egl_context) {
+ if (*egl_context != EGL_NO_CONTEXT) {
+ eglDestroyContext(egl_display, *egl_context);
+ *egl_context = EGL_NO_CONTEXT;
+ }
+}
+
+// Perform internal initialization. A GL context must be bound to the current
+// thread.
+// @param internally_created_context True if we created and own the GL context,
+// false if it was supplied by the application.
+// @return 0 if init was successful, or a negative error code on failure.
+int InitGl(bool internally_created_context) {
+ EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (egl_display == EGL_NO_DISPLAY) {
+ ALOGE("eglGetDisplay failed");
+ return -EINVAL;
+ }
+
+ EGLContext egl_context = eglGetCurrentContext();
+ if (egl_context == EGL_NO_CONTEXT) {
+ ALOGE("No GL context bound");
+ return -EINVAL;
+ }
+
+ glGetError(); // Clear the error state
+ GLint major_version, minor_version;
+ glGetIntegerv(GL_MAJOR_VERSION, &major_version);
+ glGetIntegerv(GL_MINOR_VERSION, &minor_version);
+ if (glGetError() != GL_NO_ERROR) {
+ // GL_MAJOR_VERSION and GL_MINOR_VERSION were added in GLES 3. If we get an
+ // error querying them it's almost certainly because it's GLES 1 or 2.
+ ALOGE("Error getting GL version. Must be GLES 3.2 or greater.");
+ return -EINVAL;
+ }
+
+ if (major_version < 3 || (major_version == 3 && minor_version < 2)) {
+ ALOGE("Invalid GL version: %d.%d. Must be GLES 3.2 or greater.",
+ major_version, minor_version);
+ return -EINVAL;
+ }
+
+#ifndef NDEBUG
+ if (internally_created_context) {
+ // Enable verbose GL debug output.
+ glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR);
+ glDebugMessageCallbackKHR(on_gl_error, NULL);
+ GLuint unused_ids = 0;
+ glDebugMessageControlKHR(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0,
+ &unused_ids, GL_TRUE);
+ }
+#else
+ (void)internally_created_context;
+#endif
+
+ load_gl_extensions();
+ return 0;
+}
+
+int CreateEglContext(EGLDisplay egl_display, DvrSurfaceParameter* parameters,
+ EGLContext* egl_context) {
+ *egl_context = EGL_NO_CONTEXT;
+
+ EGLint major, minor;
+ if (!eglInitialize(egl_display, &major, &minor)) {
+ ALOGE("Failed to initialize EGL");
+ return -ENXIO;
+ }
+
+ ALOGI("EGL version: %d.%d\n", major, minor);
+
+ int buffer_format = kDefaultDisplaySurfaceFormat;
+
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_FORMAT_IN:
+ buffer_format = DvrToHalSurfaceFormat(p->value);
+ break;
+ }
+ }
+
+ EGLint config_attrs[] = {EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE};
+ EGLConfig config = {0};
+
+ int ret = SelectEGLConfig(egl_display, config_attrs, buffer_format, &config);
+ if (ret < 0)
+ return ret;
+
+ ALOGI("EGL SelectEGLConfig ok.\n");
+
+ EGLint context_attrs[] = {EGL_CONTEXT_MAJOR_VERSION,
+ 3,
+ EGL_CONTEXT_MINOR_VERSION,
+ 2,
+#ifndef NDEBUG
+ EGL_CONTEXT_FLAGS_KHR,
+ EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR,
+#endif
+ EGL_NONE};
+
+ *egl_context =
+ eglCreateContext(egl_display, config, EGL_NO_CONTEXT, context_attrs);
+ if (*egl_context == EGL_NO_CONTEXT) {
+ ALOGE("eglCreateContext failed");
+ return -ENXIO;
+ }
+
+ ALOGI("eglCreateContext ok.\n");
+
+ if (!eglMakeCurrent(egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
+ *egl_context)) {
+ ALOGE("eglMakeCurrent failed");
+ DestroyEglContext(egl_display, egl_context);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+} // anonymous namespace
+
+// TODO(hendrikw): When we remove the calls to this in native_window.cpp, move
+// this back into the anonymous namespace
+std::shared_ptr<android::dvr::DisplaySurfaceClient> CreateDisplaySurfaceClient(
+ struct DvrSurfaceParameter* parameters,
+ /*out*/ android::dvr::SystemDisplayMetrics* metrics) {
+ auto client = android::dvr::DisplayClient::Create();
+ if (!client) {
+ ALOGE("Failed to create display client!");
+ return nullptr;
+ }
+
+ const int ret = client->GetDisplayMetrics(metrics);
+ if (ret < 0) {
+ ALOGE("Failed to get display metrics: %s", strerror(-ret));
+ return nullptr;
+ }
+
+ // Parameters that may be modified by the parameters array. Some of these are
+ // here for future expansion.
+ int request_width = -1;
+ int request_height = -1;
+ int request_flags = 0;
+ bool disable_distortion = false;
+ bool disable_stabilization = false;
+ bool disable_cac = false;
+ bool request_visible = true;
+ bool vertical_flip = false;
+ int request_z_order = 0;
+ bool request_exclude_from_blur = false;
+ bool request_blur_behind = true;
+ int request_format = kDefaultDisplaySurfaceFormat;
+ int request_usage = kDefaultDisplaySurfaceUsage;
+ int geometry_type = DVR_SURFACE_GEOMETRY_SINGLE;
+
+ // Handle parameter inputs.
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_WIDTH_IN:
+ request_width = p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_HEIGHT_IN:
+ request_height = p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_DISABLE_DISTORTION_IN:
+ disable_distortion = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_DISABLE_STABILIZATION_IN:
+ disable_stabilization = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_DISABLE_CAC_IN:
+ disable_cac = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_VISIBLE_IN:
+ request_visible = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_Z_ORDER_IN:
+ request_z_order = p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_EXCLUDE_FROM_BLUR_IN:
+ request_exclude_from_blur = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_BLUR_BEHIND_IN:
+ request_blur_behind = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_VERTICAL_FLIP_IN:
+ vertical_flip = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_GEOMETRY_IN:
+ geometry_type = p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_FORMAT_IN:
+ request_format = DvrToHalSurfaceFormat(p->value);
+ break;
+ case DVR_SURFACE_PARAMETER_ENABLE_LATE_LATCH_IN:
+ case DVR_SURFACE_PARAMETER_CREATE_GL_CONTEXT_IN:
+ case DVR_SURFACE_PARAMETER_DISPLAY_WIDTH_OUT:
+ case DVR_SURFACE_PARAMETER_DISPLAY_HEIGHT_OUT:
+ case DVR_SURFACE_PARAMETER_SURFACE_WIDTH_OUT:
+ case DVR_SURFACE_PARAMETER_SURFACE_HEIGHT_OUT:
+ case DVR_SURFACE_PARAMETER_INTER_LENS_METERS_OUT:
+ case DVR_SURFACE_PARAMETER_LEFT_FOV_LRBT_OUT:
+ case DVR_SURFACE_PARAMETER_RIGHT_FOV_LRBT_OUT:
+ case DVR_SURFACE_PARAMETER_VSYNC_PERIOD_OUT:
+ case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_TYPE_OUT:
+ case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_ID_OUT:
+ case DVR_SURFACE_PARAMETER_GRAPHICS_API_IN:
+ case DVR_SURFACE_PARAMETER_VK_INSTANCE_IN:
+ case DVR_SURFACE_PARAMETER_VK_PHYSICAL_DEVICE_IN:
+ case DVR_SURFACE_PARAMETER_VK_DEVICE_IN:
+ case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_IN:
+ case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_FAMILY_IN:
+ case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_COUNT_OUT:
+ case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT:
+ break;
+ default:
+ ALOGE("Invalid display surface parameter: key=%d value=%ld", p->key,
+ p->value);
+ return nullptr;
+ }
+ }
+
+ request_flags |= disable_distortion
+ ? DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION
+ : 0;
+ request_flags |=
+ disable_stabilization ? DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_EDS : 0;
+ request_flags |=
+ disable_cac ? DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_CAC : 0;
+ request_flags |= vertical_flip ? DVR_DISPLAY_SURFACE_FLAGS_VERTICAL_FLIP : 0;
+ request_flags |= (geometry_type == DVR_SURFACE_GEOMETRY_SEPARATE_2)
+ ? DVR_DISPLAY_SURFACE_FLAGS_GEOMETRY_SEPARATE_2
+ : 0;
+
+ if (request_width == -1) {
+ request_width = disable_distortion ? metrics->display_native_width
+ : metrics->distorted_width;
+ if (!disable_distortion &&
+ geometry_type == DVR_SURFACE_GEOMETRY_SEPARATE_2) {
+ // The metrics always return the single wide buffer resolution.
+ // When split between eyes, we need to halve the width of the surface.
+ request_width /= 2;
+ }
+ }
+ if (request_height == -1) {
+ request_height = disable_distortion ? metrics->display_native_height
+ : metrics->distorted_height;
+ }
+
+ std::shared_ptr<android::dvr::DisplaySurfaceClient> surface =
+ client->CreateDisplaySurface(request_width, request_height,
+ request_format, request_usage,
+ request_flags);
+ surface->SetAttributes(
+ {{DisplaySurfaceAttributeEnum::Visible,
+ DisplaySurfaceAttributeValue{request_visible}},
+ {DisplaySurfaceAttributeEnum::ZOrder,
+ DisplaySurfaceAttributeValue{request_z_order}},
+ {DisplaySurfaceAttributeEnum::ExcludeFromBlur,
+ DisplaySurfaceAttributeValue{request_exclude_from_blur}},
+ {DisplaySurfaceAttributeEnum::BlurBehind,
+ DisplaySurfaceAttributeValue{request_blur_behind}}});
+
+ // Handle parameter output requests down here so we can return surface info.
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_DISPLAY_WIDTH_OUT:
+ *static_cast<int32_t*>(p->value_out) = metrics->display_native_width;
+ break;
+ case DVR_SURFACE_PARAMETER_DISPLAY_HEIGHT_OUT:
+ *static_cast<int32_t*>(p->value_out) = metrics->display_native_height;
+ break;
+ case DVR_SURFACE_PARAMETER_SURFACE_WIDTH_OUT:
+ *static_cast<int32_t*>(p->value_out) = surface->width();
+ break;
+ case DVR_SURFACE_PARAMETER_SURFACE_HEIGHT_OUT:
+ *static_cast<int32_t*>(p->value_out) = surface->height();
+ break;
+ case DVR_SURFACE_PARAMETER_INTER_LENS_METERS_OUT:
+ *static_cast<float*>(p->value_out) = metrics->inter_lens_distance_m;
+ break;
+ case DVR_SURFACE_PARAMETER_LEFT_FOV_LRBT_OUT:
+ for (int i = 0; i < 4; ++i) {
+ float* float_values_out = static_cast<float*>(p->value_out);
+ float_values_out[i] = metrics->left_fov_lrbt[i];
+ }
+ break;
+ case DVR_SURFACE_PARAMETER_RIGHT_FOV_LRBT_OUT:
+ for (int i = 0; i < 4; ++i) {
+ float* float_values_out = static_cast<float*>(p->value_out);
+ float_values_out[i] = metrics->right_fov_lrbt[i];
+ }
+ break;
+ case DVR_SURFACE_PARAMETER_VSYNC_PERIOD_OUT:
+ *static_cast<uint64_t*>(p->value_out) = metrics->vsync_period_ns;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return surface;
+}
+
+extern "C" int dvrGetNativeDisplayDimensions(int* native_width,
+ int* native_height) {
+ int error = 0;
+ auto client = android::dvr::DisplayClient::Create(&error);
+ if (!client) {
+ ALOGE("Failed to create display client!");
+ return error;
+ }
+
+ android::dvr::SystemDisplayMetrics metrics;
+ const int ret = client->GetDisplayMetrics(&metrics);
+
+ if (ret != 0) {
+ ALOGE("Failed to get display metrics!");
+ return ret;
+ }
+
+ *native_width = static_cast<int>(metrics.display_native_width);
+ *native_height = static_cast<int>(metrics.display_native_height);
+ return 0;
+}
+
+extern "C" int dvrGetDisplaySurfaceInfo(EGLNativeWindowType win, int* width,
+ int* height, int* format) {
+ ANativeWindow* nwin = reinterpret_cast<ANativeWindow*>(win);
+ int w, h, f;
+
+ nwin->query(nwin, NATIVE_WINDOW_DEFAULT_WIDTH, &w);
+ nwin->query(nwin, NATIVE_WINDOW_DEFAULT_HEIGHT, &h);
+ nwin->query(nwin, NATIVE_WINDOW_FORMAT, &f);
+
+ if (width)
+ *width = w;
+ if (height)
+ *height = h;
+ if (format)
+ *format = f;
+
+ return 0;
+}
+
+struct DvrGraphicsContext : public android::ANativeObjectBase<
+ ANativeWindow, DvrGraphicsContext,
+ android::LightRefBase<DvrGraphicsContext>> {
+ public:
+ DvrGraphicsContext();
+ ~DvrGraphicsContext();
+
+ int graphics_api; // DVR_SURFACE_GRAPHICS_API_*
+
+ // GL specific members.
+ struct {
+ EGLDisplay egl_display;
+ EGLContext egl_context;
+ bool owns_egl_context;
+ GLuint texture_id[kSurfaceViewMaxCount];
+ int texture_count;
+ GLenum texture_target_type;
+ } gl;
+
+ // VK specific members
+ struct {
+ // These objects are passed in by the application, and are NOT owned
+ // by the context.
+ VkInstance instance;
+ VkPhysicalDevice physical_device;
+ VkDevice device;
+ VkQueue present_queue;
+ uint32_t present_queue_family;
+ const VkAllocationCallbacks* allocation_callbacks;
+ // These objects are owned by the context.
+ ANativeWindow* window;
+ VkSurfaceKHR surface;
+ VkSwapchainKHR swapchain;
+ std::vector<VkImage> swapchain_images;
+ std::vector<VkImageView> swapchain_image_views;
+ } vk;
+
+ // Display surface, metrics, and buffer management members.
+ std::shared_ptr<android::dvr::DisplaySurfaceClient> display_surface;
+ android::dvr::SystemDisplayMetrics display_metrics;
+ std::unique_ptr<android::dvr::NativeBufferQueue> buffer_queue;
+ android::dvr::NativeBufferProducer* current_buffer;
+ bool buffer_already_posted;
+
+ // Synchronization members.
+ std::unique_ptr<android::dvr::VSyncClient> vsync_client;
+ LocalHandle timerfd;
+
+ android::dvr::FrameHistory frame_history;
+
+ // Mapped surface metadata (ie: for pose delivery with presented frames).
+ volatile android::dvr::DisplaySurfaceMetadata* surface_metadata;
+
+ // LateLatch support.
+ std::unique_ptr<android::dvr::LateLatch> late_latch;
+
+ // Video mesh support.
+ std::vector<std::shared_ptr<android::dvr::VideoMeshSurfaceClient>>
+ video_mesh_surfaces;
+
+ private:
+ // ANativeWindow function implementations
+ std::mutex lock_;
+ int Post(android::dvr::NativeBufferProducer* buffer, int fence_fd);
+ static int SetSwapInterval(ANativeWindow* window, int interval);
+ static int DequeueBuffer(ANativeWindow* window, ANativeWindowBuffer** buffer,
+ int* fence_fd);
+ static int QueueBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
+ int fence_fd);
+ static int CancelBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
+ int fence_fd);
+ static int Query(const ANativeWindow* window, int what, int* value);
+ static int Perform(ANativeWindow* window, int operation, ...);
+ static int DequeueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer** buffer);
+ static int CancelBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer);
+ static int QueueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer);
+ static int LockBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer);
+
+ DISALLOW_COPY_AND_ASSIGN(DvrGraphicsContext);
+};
+
+DvrGraphicsContext::DvrGraphicsContext()
+ : graphics_api(DVR_GRAPHICS_API_GLES),
+ gl{},
+ vk{},
+ current_buffer(nullptr),
+ buffer_already_posted(false),
+ surface_metadata(nullptr) {
+ gl.egl_display = EGL_NO_DISPLAY;
+ gl.egl_context = EGL_NO_CONTEXT;
+ gl.owns_egl_context = true;
+ gl.texture_target_type = GL_TEXTURE_2D;
+
+ ANativeWindow::setSwapInterval = SetSwapInterval;
+ ANativeWindow::dequeueBuffer = DequeueBuffer;
+ ANativeWindow::cancelBuffer = CancelBuffer;
+ ANativeWindow::queueBuffer = QueueBuffer;
+ ANativeWindow::query = Query;
+ ANativeWindow::perform = Perform;
+
+ ANativeWindow::dequeueBuffer_DEPRECATED = DequeueBuffer_DEPRECATED;
+ ANativeWindow::cancelBuffer_DEPRECATED = CancelBuffer_DEPRECATED;
+ ANativeWindow::lockBuffer_DEPRECATED = LockBuffer_DEPRECATED;
+ ANativeWindow::queueBuffer_DEPRECATED = QueueBuffer_DEPRECATED;
+}
+
+DvrGraphicsContext::~DvrGraphicsContext() {
+ if (graphics_api == DVR_GRAPHICS_API_GLES) {
+ glDeleteTextures(gl.texture_count, gl.texture_id);
+ if (gl.owns_egl_context)
+ DestroyEglContext(gl.egl_display, &gl.egl_context);
+ } else if (graphics_api == DVR_GRAPHICS_API_VULKAN) {
+ if (vk.swapchain != VK_NULL_HANDLE) {
+ for (auto view : vk.swapchain_image_views) {
+ vkDestroyImageView(vk.device, view, vk.allocation_callbacks);
+ }
+ vkDestroySwapchainKHR(vk.device, vk.swapchain, vk.allocation_callbacks);
+ vkDestroySurfaceKHR(vk.instance, vk.surface, vk.allocation_callbacks);
+ delete vk.window;
+ }
+ }
+}
+
+int dvrGraphicsContextCreate(struct DvrSurfaceParameter* parameters,
+ DvrGraphicsContext** return_graphics_context) {
+ std::unique_ptr<DvrGraphicsContext> context(new DvrGraphicsContext);
+
+ // See whether we're using GL or Vulkan
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_GRAPHICS_API_IN:
+ context->graphics_api = p->value;
+ break;
+ }
+ }
+
+ if (context->graphics_api == DVR_GRAPHICS_API_GLES) {
+ context->gl.egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (context->gl.egl_display == EGL_NO_DISPLAY) {
+ ALOGE("eglGetDisplay failed");
+ return -ENXIO;
+ }
+
+ // See if we should create a GL context
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_CREATE_GL_CONTEXT_IN:
+ context->gl.owns_egl_context = p->value != 0;
+ break;
+ }
+ }
+
+ if (context->gl.owns_egl_context) {
+ int ret = CreateEglContext(context->gl.egl_display, parameters,
+ &context->gl.egl_context);
+ if (ret < 0)
+ return ret;
+ } else {
+ context->gl.egl_context = eglGetCurrentContext();
+ }
+
+ int ret = InitGl(context->gl.owns_egl_context);
+ if (ret < 0)
+ return ret;
+ } else if (context->graphics_api == DVR_GRAPHICS_API_VULKAN) {
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_VK_INSTANCE_IN:
+ context->vk.instance = reinterpret_cast<VkInstance>(p->value);
+ break;
+ case DVR_SURFACE_PARAMETER_VK_PHYSICAL_DEVICE_IN:
+ context->vk.physical_device =
+ reinterpret_cast<VkPhysicalDevice>(p->value);
+ break;
+ case DVR_SURFACE_PARAMETER_VK_DEVICE_IN:
+ context->vk.device = reinterpret_cast<VkDevice>(p->value);
+ break;
+ case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_IN:
+ context->vk.present_queue = reinterpret_cast<VkQueue>(p->value);
+ break;
+ case DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_FAMILY_IN:
+ context->vk.present_queue_family = static_cast<uint32_t>(p->value);
+ break;
+ }
+ }
+ } else {
+ ALOGE("Error: invalid graphics API type");
+ return -EINVAL;
+ }
+
+ context->display_surface =
+ CreateDisplaySurfaceClient(parameters, &context->display_metrics);
+ if (!context->display_surface) {
+ ALOGE("Error: failed to create display surface client");
+ return -ECOMM;
+ }
+
+ context->buffer_queue.reset(new android::dvr::NativeBufferQueue(
+ context->gl.egl_display, context->display_surface, kDefaultBufferCount));
+
+ // The way the call sequence works we need 1 more than the buffer queue
+ // capacity to store data for all pending frames
+ context->frame_history.Reset(context->buffer_queue->GetQueueCapacity() + 1);
+
+ context->vsync_client = android::dvr::VSyncClient::Create();
+ if (!context->vsync_client) {
+ ALOGE("Error: failed to create vsync client");
+ return -ECOMM;
+ }
+
+ context->timerfd.Reset(timerfd_create(CLOCK_MONOTONIC, 0));
+ if (!context->timerfd) {
+ ALOGE("Error: timerfd_create failed because: %s", strerror(errno));
+ return -EPERM;
+ }
+
+ context->surface_metadata = context->display_surface->GetMetadataBufferPtr();
+ if (!context->surface_metadata) {
+ ALOGE("Error: surface metadata allocation failed");
+ return -ENOMEM;
+ }
+
+ ALOGI("buffer: %d x %d\n", context->display_surface->width(),
+ context->display_surface->height());
+
+ if (context->graphics_api == DVR_GRAPHICS_API_GLES) {
+ context->gl.texture_count = (context->display_surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_GEOMETRY_SEPARATE_2)
+ ? 2
+ : 1;
+
+ // Create the GL textures.
+ glGenTextures(context->gl.texture_count, context->gl.texture_id);
+
+ // We must make sure that we have at least one buffer allocated at this time
+ // so that anyone who tries to bind an FBO to context->texture_id
+ // will not get an incomplete buffer.
+ context->current_buffer = context->buffer_queue->Dequeue();
+ CHECK(context->gl.texture_count ==
+ context->current_buffer->buffer()->slice_count());
+ for (int i = 0; i < context->gl.texture_count; ++i) {
+ glBindTexture(context->gl.texture_target_type, context->gl.texture_id[i]);
+ glEGLImageTargetTexture2DOES(context->gl.texture_target_type,
+ context->current_buffer->image_khr(i));
+ }
+ glBindTexture(context->gl.texture_target_type, 0);
+ CHECK_GL();
+
+ bool is_late_latch = false;
+
+ // Pass back the texture target type and id.
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_ENABLE_LATE_LATCH_IN:
+ is_late_latch = !!p->value;
+ break;
+ case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_TYPE_OUT:
+ *static_cast<GLenum*>(p->value_out) = context->gl.texture_target_type;
+ break;
+ case DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_ID_OUT:
+ for (int i = 0; i < context->gl.texture_count; ++i) {
+ *(static_cast<GLuint*>(p->value_out) + i) =
+ context->gl.texture_id[i];
+ }
+ break;
+ }
+ }
+
+ // Initialize late latch.
+ if (is_late_latch) {
+ LocalHandle fd;
+ int ret = context->display_surface->GetMetadataBufferFd(&fd);
+ if (ret == 0) {
+ context->late_latch.reset(
+ new android::dvr::LateLatch(true, std::move(fd)));
+ } else {
+ ALOGE("Error: failed to get surface metadata buffer fd for late latch");
+ }
+ }
+ } else if (context->graphics_api == DVR_GRAPHICS_API_VULKAN) {
+ VkResult result = VK_SUCCESS;
+ // Create a VkSurfaceKHR from the ANativeWindow.
+ VkAndroidSurfaceCreateInfoKHR android_surface_ci = {};
+ android_surface_ci.sType =
+ VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
+ android_surface_ci.window = context.get();
+ result = vkCreateAndroidSurfaceKHR(
+ context->vk.instance, &android_surface_ci,
+ context->vk.allocation_callbacks, &context->vk.surface);
+ CHECK_EQ(result, VK_SUCCESS);
+ VkBool32 surface_supports_present = VK_FALSE;
+ result = vkGetPhysicalDeviceSurfaceSupportKHR(
+ context->vk.physical_device, context->vk.present_queue_family,
+ context->vk.surface, &surface_supports_present);
+ CHECK_EQ(result, VK_SUCCESS);
+ if (!surface_supports_present) {
+ ALOGE("Error: provided queue family (%u) does not support presentation",
+ context->vk.present_queue_family);
+ return -EPERM;
+ }
+ VkSurfaceCapabilitiesKHR surface_capabilities = {};
+ result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ context->vk.physical_device, context->vk.surface,
+ &surface_capabilities);
+ CHECK_EQ(result, VK_SUCCESS);
+ // Determine the swapchain image format.
+ uint32_t device_surface_format_count = 0;
+ result = vkGetPhysicalDeviceSurfaceFormatsKHR(
+ context->vk.physical_device, context->vk.surface,
+ &device_surface_format_count, nullptr);
+ CHECK_EQ(result, VK_SUCCESS);
+ std::vector<VkSurfaceFormatKHR> device_surface_formats(
+ device_surface_format_count);
+ result = vkGetPhysicalDeviceSurfaceFormatsKHR(
+ context->vk.physical_device, context->vk.surface,
+ &device_surface_format_count, device_surface_formats.data());
+ CHECK_EQ(result, VK_SUCCESS);
+ CHECK_GT(device_surface_format_count, 0U);
+ CHECK_NE(device_surface_formats[0].format, VK_FORMAT_UNDEFINED);
+ VkSurfaceFormatKHR present_surface_format = device_surface_formats[0];
+ // Determine the swapchain present mode.
+ // TODO(cort): query device_present_modes to make sure MAILBOX is supported.
+ // But according to libvulkan, it is.
+ uint32_t device_present_mode_count = 0;
+ result = vkGetPhysicalDeviceSurfacePresentModesKHR(
+ context->vk.physical_device, context->vk.surface,
+ &device_present_mode_count, nullptr);
+ CHECK_EQ(result, VK_SUCCESS);
+ std::vector<VkPresentModeKHR> device_present_modes(
+ device_present_mode_count);
+ result = vkGetPhysicalDeviceSurfacePresentModesKHR(
+ context->vk.physical_device, context->vk.surface,
+ &device_present_mode_count, device_present_modes.data());
+ CHECK_EQ(result, VK_SUCCESS);
+ VkPresentModeKHR present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
+ // Extract presentation surface extents, image count, transform, usages,
+ // etc.
+ LOG_ASSERT(
+ static_cast<int>(surface_capabilities.currentExtent.width) != -1 &&
+ static_cast<int>(surface_capabilities.currentExtent.height) != -1);
+ VkExtent2D swapchain_extent = surface_capabilities.currentExtent;
+
+ uint32_t desired_image_count = surface_capabilities.minImageCount;
+ if (surface_capabilities.maxImageCount > 0 &&
+ desired_image_count > surface_capabilities.maxImageCount) {
+ desired_image_count = surface_capabilities.maxImageCount;
+ }
+ VkSurfaceTransformFlagBitsKHR surface_transform =
+ surface_capabilities.currentTransform;
+ VkImageUsageFlags image_usage_flags =
+ surface_capabilities.supportedUsageFlags;
+ CHECK_NE(surface_capabilities.supportedCompositeAlpha,
+ static_cast<VkFlags>(0));
+ VkCompositeAlphaFlagBitsKHR composite_alpha =
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ if (!(surface_capabilities.supportedCompositeAlpha &
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR)) {
+ composite_alpha = VkCompositeAlphaFlagBitsKHR(
+ static_cast<int>(surface_capabilities.supportedCompositeAlpha) &
+ -static_cast<int>(surface_capabilities.supportedCompositeAlpha));
+ }
+ // Create VkSwapchainKHR
+ VkSwapchainCreateInfoKHR swapchain_ci = {};
+ swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ swapchain_ci.pNext = nullptr;
+ swapchain_ci.surface = context->vk.surface;
+ swapchain_ci.minImageCount = desired_image_count;
+ swapchain_ci.imageFormat = present_surface_format.format;
+ swapchain_ci.imageColorSpace = present_surface_format.colorSpace;
+ swapchain_ci.imageExtent.width = swapchain_extent.width;
+ swapchain_ci.imageExtent.height = swapchain_extent.height;
+ swapchain_ci.imageUsage = image_usage_flags;
+ swapchain_ci.preTransform = surface_transform;
+ swapchain_ci.compositeAlpha = composite_alpha;
+ swapchain_ci.imageArrayLayers = 1;
+ swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ swapchain_ci.queueFamilyIndexCount = 0;
+ swapchain_ci.pQueueFamilyIndices = nullptr;
+ swapchain_ci.presentMode = present_mode;
+ swapchain_ci.clipped = VK_TRUE;
+ swapchain_ci.oldSwapchain = VK_NULL_HANDLE;
+ result = vkCreateSwapchainKHR(context->vk.device, &swapchain_ci,
+ context->vk.allocation_callbacks,
+ &context->vk.swapchain);
+ CHECK_EQ(result, VK_SUCCESS);
+ // Create swapchain image views
+ uint32_t image_count = 0;
+ result = vkGetSwapchainImagesKHR(context->vk.device, context->vk.swapchain,
+ &image_count, nullptr);
+ CHECK_EQ(result, VK_SUCCESS);
+ CHECK_GT(image_count, 0U);
+ context->vk.swapchain_images.resize(image_count);
+ result = vkGetSwapchainImagesKHR(context->vk.device, context->vk.swapchain,
+ &image_count,
+ context->vk.swapchain_images.data());
+ CHECK_EQ(result, VK_SUCCESS);
+ context->vk.swapchain_image_views.resize(image_count);
+ VkImageViewCreateInfo image_view_ci = {};
+ image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ image_view_ci.pNext = nullptr;
+ image_view_ci.flags = 0;
+ image_view_ci.format = swapchain_ci.imageFormat;
+ image_view_ci.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ image_view_ci.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ image_view_ci.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ image_view_ci.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+ image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ image_view_ci.subresourceRange.baseMipLevel = 0;
+ image_view_ci.subresourceRange.levelCount = 1;
+ image_view_ci.subresourceRange.baseArrayLayer = 0;
+ image_view_ci.subresourceRange.layerCount = 1;
+ image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ image_view_ci.image = VK_NULL_HANDLE; // filled in below
+ for (uint32_t i = 0; i < image_count; ++i) {
+ image_view_ci.image = context->vk.swapchain_images[i];
+ result = vkCreateImageView(context->vk.device, &image_view_ci,
+ context->vk.allocation_callbacks,
+ &context->vk.swapchain_image_views[i]);
+ CHECK_EQ(result, VK_SUCCESS);
+ }
+ // Fill in any requested output parameters.
+ for (auto p = parameters; p && p->key != DVR_SURFACE_PARAMETER_NONE; ++p) {
+ switch (p->key) {
+ case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_COUNT_OUT:
+ *static_cast<uint32_t*>(p->value_out) = image_count;
+ break;
+ case DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT:
+ *static_cast<VkFormat*>(p->value_out) = swapchain_ci.imageFormat;
+ break;
+ }
+ }
+ }
+
+ *return_graphics_context = context.release();
+ return 0;
+}
+
+void dvrGraphicsContextDestroy(DvrGraphicsContext* graphics_context) {
+ delete graphics_context;
+}
+
+// ANativeWindow function implementations. These should only be used
+// by the Vulkan path.
+int DvrGraphicsContext::Post(android::dvr::NativeBufferProducer* buffer,
+ int fence_fd) {
+ LOG_ASSERT(graphics_api == DVR_GRAPHICS_API_VULKAN);
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+ ALOGI_IF(TRACE, "DvrGraphicsContext::Post: buffer_id=%d, fence_fd=%d",
+ buffer->buffer()->id(), fence_fd);
+ ALOGW_IF(!display_surface->visible(),
+ "DvrGraphicsContext::Post: Posting buffer on invisible surface!!!");
+ // The NativeBufferProducer closes the fence fd, so dup it for tracking in the
+ // frame history.
+ frame_history.OnFrameSubmit(LocalHandle::AsDuplicate(fence_fd));
+ int result = buffer->Post(fence_fd, 0);
+ return result;
+}
+
+int DvrGraphicsContext::SetSwapInterval(ANativeWindow* window, int interval) {
+ ALOGI_IF(TRACE, "SetSwapInterval: window=%p interval=%d", window, interval);
+ DvrGraphicsContext* self = getSelf(window);
+ (void)self;
+ LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ return android::NO_ERROR;
+}
+
+int DvrGraphicsContext::DequeueBuffer(ANativeWindow* window,
+ ANativeWindowBuffer** buffer,
+ int* fence_fd) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+
+ DvrGraphicsContext* self = getSelf(window);
+ LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ if (!self->current_buffer) {
+ self->current_buffer = self->buffer_queue.get()->Dequeue();
+ }
+ ATRACE_ASYNC_BEGIN("BufferDraw", self->current_buffer->buffer()->id());
+ *fence_fd = self->current_buffer->ClaimReleaseFence().Release();
+ *buffer = self->current_buffer;
+
+ ALOGI_IF(TRACE, "DvrGraphicsContext::DequeueBuffer: fence_fd=%d", *fence_fd);
+ return android::NO_ERROR;
+}
+
+int DvrGraphicsContext::QueueBuffer(ANativeWindow* window,
+ ANativeWindowBuffer* buffer, int fence_fd) {
+ ATRACE_NAME("NativeWindow::QueueBuffer");
+ ALOGI_IF(TRACE, "NativeWindow::QueueBuffer: fence_fd=%d", fence_fd);
+
+ DvrGraphicsContext* self = getSelf(window);
+ LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ android::dvr::NativeBufferProducer* native_buffer =
+ static_cast<android::dvr::NativeBufferProducer*>(buffer);
+ ATRACE_ASYNC_END("BufferDraw", native_buffer->buffer()->id());
+ bool do_post = true;
+ if (self->buffer_already_posted) {
+ // Check that the buffer is the one we expect, but handle it if this happens
+ // in production by allowing this buffer to post on top of the previous one.
+ DCHECK(native_buffer == self->current_buffer);
+ if (native_buffer == self->current_buffer) {
+ do_post = false;
+ if (fence_fd >= 0)
+ close(fence_fd);
+ }
+ }
+ if (do_post) {
+ ATRACE_ASYNC_BEGIN("BufferPost", native_buffer->buffer()->id());
+ self->Post(native_buffer, fence_fd);
+ }
+ self->buffer_already_posted = false;
+ self->current_buffer = nullptr;
+
+ return android::NO_ERROR;
+}
+
+int DvrGraphicsContext::CancelBuffer(ANativeWindow* window,
+ ANativeWindowBuffer* buffer,
+ int fence_fd) {
+ ATRACE_NAME("DvrGraphicsContext::CancelBuffer");
+ ALOGI_IF(TRACE, "DvrGraphicsContext::CancelBuffer: fence_fd: %d", fence_fd);
+
+ DvrGraphicsContext* self = getSelf(window);
+ LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ android::dvr::NativeBufferProducer* native_buffer =
+ static_cast<android::dvr::NativeBufferProducer*>(buffer);
+ ATRACE_ASYNC_END("BufferDraw", native_buffer->buffer()->id());
+ ATRACE_INT("CancelBuffer", native_buffer->buffer()->id());
+ bool do_enqueue = true;
+ if (self->buffer_already_posted) {
+ // Check that the buffer is the one we expect, but handle it if this happens
+ // in production by returning this buffer to the buffer queue.
+ DCHECK(native_buffer == self->current_buffer);
+ if (native_buffer == self->current_buffer) {
+ do_enqueue = false;
+ }
+ }
+ if (do_enqueue) {
+ self->buffer_queue.get()->Enqueue(native_buffer);
+ }
+ if (fence_fd >= 0)
+ close(fence_fd);
+ self->buffer_already_posted = false;
+ self->current_buffer = nullptr;
+
+ return android::NO_ERROR;
+}
+
+int DvrGraphicsContext::Query(const ANativeWindow* window, int what,
+ int* value) {
+ DvrGraphicsContext* self = getSelf(const_cast<ANativeWindow*>(window));
+ LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ switch (what) {
+ case NATIVE_WINDOW_WIDTH:
+ *value = self->display_surface->width();
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_HEIGHT:
+ *value = self->display_surface->height();
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_FORMAT:
+ *value = self->display_surface->format();
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
+ *value = 1;
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_CONCRETE_TYPE:
+ *value = NATIVE_WINDOW_SURFACE;
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER:
+ *value = 1;
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_DEFAULT_WIDTH:
+ *value = self->display_surface->width();
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_DEFAULT_HEIGHT:
+ *value = self->display_surface->height();
+ return android::NO_ERROR;
+ case NATIVE_WINDOW_TRANSFORM_HINT:
+ *value = 0;
+ return android::NO_ERROR;
+ }
+
+ *value = 0;
+ return android::BAD_VALUE;
+}
+
+int DvrGraphicsContext::Perform(ANativeWindow* window, int operation, ...) {
+ DvrGraphicsContext* self = getSelf(window);
+ LOG_ASSERT(self->graphics_api == DVR_GRAPHICS_API_VULKAN);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ va_list args;
+ va_start(args, operation);
+
+ // TODO(eieio): The following operations are not used at this time. They are
+ // included here to help document which operations may be useful and what
+ // parameters they take.
+ switch (operation) {
+ case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: {
+ int w = va_arg(args, int);
+ int h = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: w=%d h=%d", w, h);
+ return android::NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_SET_BUFFERS_FORMAT: {
+ int format = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_FORMAT: format=%d", format);
+ return android::NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: {
+ int transform = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: transform=%d",
+ transform);
+ return android::NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_SET_USAGE: {
+ int usage = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_USAGE: usage=%d", usage);
+ return android::NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_CONNECT:
+ case NATIVE_WINDOW_DISCONNECT:
+ case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
+ case NATIVE_WINDOW_API_CONNECT:
+ case NATIVE_WINDOW_API_DISCONNECT:
+ // TODO(eieio): we should implement these
+ return android::NO_ERROR;
+
+ case NATIVE_WINDOW_SET_BUFFER_COUNT: {
+ int buffer_count = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFER_COUNT: bufferCount=%d",
+ buffer_count);
+ return android::NO_ERROR;
+ }
+ case NATIVE_WINDOW_SET_BUFFERS_DATASPACE: {
+ android_dataspace_t data_space =
+ static_cast<android_dataspace_t>(va_arg(args, int));
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_DATASPACE: dataSpace=%d",
+ data_space);
+ return android::NO_ERROR;
+ }
+ case NATIVE_WINDOW_SET_SCALING_MODE: {
+ int mode = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_SCALING_MODE: mode=%d", mode);
+ return android::NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_LOCK:
+ case NATIVE_WINDOW_UNLOCK_AND_POST:
+ case NATIVE_WINDOW_SET_CROP:
+ case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
+ return android::INVALID_OPERATION;
+ }
+
+ return android::NAME_NOT_FOUND;
+}
+
+int DvrGraphicsContext::DequeueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer** buffer) {
+ int fence_fd = -1;
+ int ret = DequeueBuffer(window, buffer, &fence_fd);
+
+ // wait for fence
+ if (ret == android::NO_ERROR && fence_fd != -1)
+ close(fence_fd);
+
+ return ret;
+}
+
+int DvrGraphicsContext::CancelBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer) {
+ return CancelBuffer(window, buffer, -1);
+}
+
+int DvrGraphicsContext::QueueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer) {
+ return QueueBuffer(window, buffer, -1);
+}
+
+int DvrGraphicsContext::LockBuffer_DEPRECATED(ANativeWindow* /*window*/,
+ ANativeWindowBuffer* /*buffer*/) {
+ return android::NO_ERROR;
+}
+// End ANativeWindow implementation
+
+int dvrSetEdsPose(DvrGraphicsContext* graphics_context,
+ float32x4_t render_pose_orientation,
+ float32x4_t render_pose_translation) {
+ ATRACE_NAME("dvrSetEdsPose");
+ if (!graphics_context->current_buffer) {
+ ALOGE("dvrBeginRenderFrame must be called before dvrSetEdsPose");
+ return -EPERM;
+ }
+
+ // When late-latching is enabled, the pose buffer is written by the GPU, so
+ // we don't touch it here.
+ float32x4_t is_late_latch = DVR_POSE_LATE_LATCH;
+ if (render_pose_orientation[0] != is_late_latch[0]) {
+ volatile android::dvr::DisplaySurfaceMetadata* data =
+ graphics_context->surface_metadata;
+ uint32_t buffer_index =
+ graphics_context->current_buffer->surface_buffer_index();
+ ALOGE_IF(TRACE, "write pose index %d %f %f", buffer_index,
+ render_pose_orientation[0], render_pose_orientation[1]);
+ data->orientation[buffer_index] = render_pose_orientation;
+ data->translation[buffer_index] = render_pose_translation;
+ }
+
+ return 0;
+}
+
+int dvrBeginRenderFrameEds(DvrGraphicsContext* graphics_context,
+ float32x4_t render_pose_orientation,
+ float32x4_t render_pose_translation) {
+ ATRACE_NAME("dvrBeginRenderFrameEds");
+ LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_GLES);
+ CHECK_GL();
+ // Grab a buffer from the queue and set its pose.
+ if (!graphics_context->current_buffer) {
+ graphics_context->current_buffer =
+ graphics_context->buffer_queue->Dequeue();
+ }
+
+ int ret = dvrSetEdsPose(graphics_context, render_pose_orientation,
+ render_pose_translation);
+ if (ret < 0)
+ return ret;
+
+ ATRACE_ASYNC_BEGIN("BufferDraw",
+ graphics_context->current_buffer->buffer()->id());
+
+ {
+ ATRACE_NAME("glEGLImageTargetTexture2DOES");
+ // Bind the texture to the latest buffer in the queue.
+ for (int i = 0; i < graphics_context->gl.texture_count; ++i) {
+ glBindTexture(graphics_context->gl.texture_target_type,
+ graphics_context->gl.texture_id[i]);
+ glEGLImageTargetTexture2DOES(
+ graphics_context->gl.texture_target_type,
+ graphics_context->current_buffer->image_khr(i));
+ }
+ glBindTexture(graphics_context->gl.texture_target_type, 0);
+ }
+ CHECK_GL();
+ return 0;
+}
+int dvrBeginRenderFrameEdsVk(DvrGraphicsContext* graphics_context,
+ float32x4_t render_pose_orientation,
+ float32x4_t render_pose_translation,
+ VkSemaphore acquire_semaphore,
+ VkFence acquire_fence,
+ uint32_t* swapchain_image_index,
+ VkImageView* swapchain_image_view) {
+ ATRACE_NAME("dvrBeginRenderFrameEds");
+ LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_VULKAN);
+
+ // Acquire a swapchain image. This calls Dequeue() internally.
+ VkResult result = vkAcquireNextImageKHR(
+ graphics_context->vk.device, graphics_context->vk.swapchain, UINT64_MAX,
+ acquire_semaphore, acquire_fence, swapchain_image_index);
+ if (result != VK_SUCCESS)
+ return -EINVAL;
+
+ // Set the pose pose.
+ int ret = dvrSetEdsPose(graphics_context, render_pose_orientation,
+ render_pose_translation);
+ if (ret < 0)
+ return ret;
+ *swapchain_image_view =
+ graphics_context->vk.swapchain_image_views[*swapchain_image_index];
+ return 0;
+}
+
+int dvrBeginRenderFrame(DvrGraphicsContext* graphics_context) {
+ return dvrBeginRenderFrameEds(graphics_context, DVR_POSE_NO_EDS,
+ DVR_POSE_NO_EDS);
+}
+int dvrBeginRenderFrameVk(DvrGraphicsContext* graphics_context,
+ VkSemaphore acquire_semaphore, VkFence acquire_fence,
+ uint32_t* swapchain_image_index,
+ VkImageView* swapchain_image_view) {
+ return dvrBeginRenderFrameEdsVk(
+ graphics_context, DVR_POSE_NO_EDS, DVR_POSE_NO_EDS, acquire_semaphore,
+ acquire_fence, swapchain_image_index, swapchain_image_view);
+}
+
+int dvrBeginRenderFrameLateLatch(DvrGraphicsContext* graphics_context,
+ uint32_t /*flags*/,
+ uint32_t target_vsync_count, int num_views,
+ const float** projection_matrices,
+ const float** eye_from_head_matrices,
+ const float** pose_offset_matrices,
+ uint32_t* out_late_latch_buffer_id) {
+ if (!graphics_context->late_latch) {
+ return -EPERM;
+ }
+ if (num_views > DVR_GRAPHICS_SURFACE_MAX_VIEWS) {
+ LOG(ERROR) << "dvrBeginRenderFrameLateLatch called with too many views.";
+ return -EINVAL;
+ }
+ dvrBeginRenderFrameEds(graphics_context, DVR_POSE_LATE_LATCH,
+ DVR_POSE_LATE_LATCH);
+ auto& ll = graphics_context->late_latch;
+ // TODO(jbates) Need to change this shader so that it dumps the single
+ // captured pose for both eyes into the display surface metadata buffer at
+ // the right index.
+ android::dvr::LateLatchInput input;
+ memset(&input, 0, sizeof(input));
+ for (int i = 0; i < num_views; ++i) {
+ memcpy(input.proj_mat + i, *(projection_matrices + i), 16 * sizeof(float));
+ memcpy(input.eye_from_head_mat + i, *(eye_from_head_matrices + i),
+ 16 * sizeof(float));
+ memcpy(input.pose_offset + i, *(pose_offset_matrices + i),
+ 16 * sizeof(float));
+ }
+ input.pose_index =
+ target_vsync_count & android::dvr::kPoseAsyncBufferIndexMask;
+ input.render_pose_index =
+ graphics_context->current_buffer->surface_buffer_index();
+ ll->AddLateLatch(input);
+ *out_late_latch_buffer_id = ll->output_buffer_id();
+ return 0;
+}
+
+extern "C" int dvrGraphicsWaitNextFrame(
+ DvrGraphicsContext* graphics_context, int64_t start_delay_ns,
+ DvrFrameSchedule* out_next_frame_schedule) {
+ start_delay_ns = std::max(start_delay_ns, static_cast<int64_t>(0));
+
+ // We only do one-shot timers:
+ int64_t wake_time_ns = 0;
+
+ uint32_t current_frame_vsync;
+ int64_t current_frame_scheduled_finish_ns;
+ int64_t vsync_period_ns;
+
+ int fetch_schedule_result = graphics_context->vsync_client->GetSchedInfo(
+ &vsync_period_ns, ¤t_frame_scheduled_finish_ns,
+ ¤t_frame_vsync);
+ if (fetch_schedule_result == 0) {
+ wake_time_ns = current_frame_scheduled_finish_ns + start_delay_ns;
+ // If the last wakeup time is still in the future, use it instead to avoid
+ // major schedule jumps when applications call WaitNextFrame with
+ // aggressive offsets.
+ int64_t now = android::dvr::GetSystemClockNs();
+ if (android::dvr::TimestampGT(wake_time_ns - vsync_period_ns, now)) {
+ wake_time_ns -= vsync_period_ns;
+ --current_frame_vsync;
+ }
+ // If the next wakeup time is in the past, add a vsync period to keep the
+ // application on schedule.
+ if (android::dvr::TimestampLT(wake_time_ns, now)) {
+ wake_time_ns += vsync_period_ns;
+ ++current_frame_vsync;
+ }
+ } else {
+ ALOGE("Error getting frame schedule because: %s",
+ strerror(-fetch_schedule_result));
+ // Sleep for a vsync period to avoid cascading failure.
+ wake_time_ns = android::dvr::GetSystemClockNs() +
+ graphics_context->display_metrics.vsync_period_ns;
+ }
+
+ // Adjust nsec to [0..999,999,999].
+ struct itimerspec wake_time;
+ wake_time.it_interval.tv_sec = 0;
+ wake_time.it_interval.tv_nsec = 0;
+ wake_time.it_value = android::dvr::NsToTimespec(wake_time_ns);
+ bool sleep_result =
+ timerfd_settime(graphics_context->timerfd.Get(), TFD_TIMER_ABSTIME,
+ &wake_time, nullptr) == 0;
+ if (sleep_result) {
+ ATRACE_NAME("sleep");
+ uint64_t expirations = 0;
+ sleep_result = read(graphics_context->timerfd.Get(), &expirations,
+ sizeof(uint64_t)) == sizeof(uint64_t);
+ if (!sleep_result) {
+ ALOGE("Error: timerfd read failed");
+ }
+ } else {
+ ALOGE("Error: timerfd_settime failed because: %s", strerror(errno));
+ }
+
+ auto& frame_history = graphics_context->frame_history;
+ frame_history.CheckForFinishedFrames();
+ if (fetch_schedule_result == 0) {
+ uint32_t next_frame_vsync =
+ current_frame_vsync +
+ frame_history.PredictNextFrameVsyncInterval(vsync_period_ns);
+ int64_t next_frame_scheduled_finish =
+ (wake_time_ns - start_delay_ns) + vsync_period_ns;
+ frame_history.OnFrameStart(next_frame_vsync, next_frame_scheduled_finish);
+ if (out_next_frame_schedule) {
+ out_next_frame_schedule->vsync_count = next_frame_vsync;
+ out_next_frame_schedule->scheduled_frame_finish_ns =
+ next_frame_scheduled_finish;
+ }
+ } else {
+ frame_history.OnFrameStart(UINT32_MAX, -1);
+ }
+
+ return (fetch_schedule_result == 0 && sleep_result) ? 0 : -1;
+}
+
+extern "C" void dvrGraphicsPostEarly(DvrGraphicsContext* graphics_context) {
+ ATRACE_NAME("dvrGraphicsPostEarly");
+ ALOGI_IF(TRACE, "dvrGraphicsPostEarly");
+
+ LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_GLES);
+
+ // Note that this function can be called before or after
+ // dvrBeginRenderFrame.
+ if (!graphics_context->buffer_already_posted) {
+ graphics_context->buffer_already_posted = true;
+
+ if (!graphics_context->current_buffer) {
+ graphics_context->current_buffer =
+ graphics_context->buffer_queue->Dequeue();
+ }
+
+ auto buffer = graphics_context->current_buffer->buffer().get();
+ ATRACE_ASYNC_BEGIN("BufferPost", buffer->id());
+ int result = buffer->Post<uint64_t>(LocalHandle(), 0);
+ if (result < 0)
+ ALOGE("Buffer post failed: %d (%s)", result, strerror(-result));
+ }
+}
+
+int dvrPresent(DvrGraphicsContext* graphics_context) {
+ LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_GLES);
+
+ std::array<char, 128> buf;
+ snprintf(buf.data(), buf.size(), "dvrPresent|vsync=%d|",
+ graphics_context->frame_history.GetCurrentFrameVsync());
+ ATRACE_NAME(buf.data());
+
+ if (!graphics_context->current_buffer) {
+ ALOGE("Error: dvrPresent called without dvrBeginRenderFrame");
+ return -EPERM;
+ }
+
+ LocalHandle fence_fd =
+ android::dvr::CreateGLSyncAndFlush(graphics_context->gl.egl_display);
+
+ ALOGI_IF(TRACE, "PostBuffer: buffer_id=%d, fence_fd=%d",
+ graphics_context->current_buffer->buffer()->id(), fence_fd.Get());
+ ALOGW_IF(!graphics_context->display_surface->visible(),
+ "PostBuffer: Posting buffer on invisible surface!!!");
+
+ auto buffer = graphics_context->current_buffer->buffer().get();
+ ATRACE_ASYNC_END("BufferDraw", buffer->id());
+ if (!graphics_context->buffer_already_posted) {
+ ATRACE_ASYNC_BEGIN("BufferPost", buffer->id());
+ int result = buffer->Post<uint64_t>(fence_fd, 0);
+ if (result < 0)
+ ALOGE("Buffer post failed: %d (%s)", result, strerror(-result));
+ }
+
+ graphics_context->frame_history.OnFrameSubmit(std::move(fence_fd));
+ graphics_context->buffer_already_posted = false;
+ graphics_context->current_buffer = nullptr;
+ return 0;
+}
+
+int dvrPresentVk(DvrGraphicsContext* graphics_context,
+ VkSemaphore submit_semaphore, uint32_t swapchain_image_index) {
+ LOG_ASSERT(graphics_context->graphics_api == DVR_GRAPHICS_API_VULKAN);
+
+ std::array<char, 128> buf;
+ snprintf(buf.data(), buf.size(), "dvrPresent|vsync=%d|",
+ graphics_context->frame_history.GetCurrentFrameVsync());
+ ATRACE_NAME(buf.data());
+
+ if (!graphics_context->current_buffer) {
+ ALOGE("Error: dvrPresentVk called without dvrBeginRenderFrameVk");
+ return -EPERM;
+ }
+
+ // Present the specified image. Internally, this gets a fence from the
+ // Vulkan driver and passes it to DvrGraphicsContext::Post(),
+ // which in turn passes it to buffer->Post() and adds it to frame_history.
+ VkPresentInfoKHR present_info = {};
+ present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = &graphics_context->vk.swapchain;
+ present_info.pImageIndices = &swapchain_image_index;
+ present_info.waitSemaphoreCount =
+ (submit_semaphore != VK_NULL_HANDLE) ? 1 : 0;
+ present_info.pWaitSemaphores = &submit_semaphore;
+ VkResult result =
+ vkQueuePresentKHR(graphics_context->vk.present_queue, &present_info);
+ if (result != VK_SUCCESS) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+extern "C" int dvrGetFrameScheduleResults(DvrGraphicsContext* context,
+ DvrFrameScheduleResult* results,
+ int in_result_count) {
+ if (!context || !results)
+ return -EINVAL;
+
+ return context->frame_history.GetPreviousFrameResults(results,
+ in_result_count);
+}
+
+extern "C" void dvrGraphicsSurfaceSetVisible(
+ DvrGraphicsContext* graphics_context, int visible) {
+ graphics_context->display_surface->SetVisible(visible);
+}
+
+extern "C" int dvrGraphicsSurfaceGetVisible(
+ DvrGraphicsContext* graphics_context) {
+ return graphics_context->display_surface->visible() ? 1 : 0;
+}
+
+extern "C" void dvrGraphicsSurfaceSetZOrder(
+ DvrGraphicsContext* graphics_context, int z_order) {
+ graphics_context->display_surface->SetZOrder(z_order);
+}
+
+extern "C" int dvrGraphicsSurfaceGetZOrder(
+ DvrGraphicsContext* graphics_context) {
+ return graphics_context->display_surface->z_order();
+}
+
+extern "C" DvrVideoMeshSurface* dvrGraphicsVideoMeshSurfaceCreate(
+ DvrGraphicsContext* graphics_context) {
+ auto display_surface = graphics_context->display_surface;
+ // A DisplaySurface must be created prior to the creation of a
+ // VideoMeshSurface.
+ LOG_ASSERT(display_surface != nullptr);
+
+ LocalChannelHandle surface_handle = display_surface->CreateVideoMeshSurface();
+ if (!surface_handle.valid()) {
+ return nullptr;
+ }
+
+ std::unique_ptr<DvrVideoMeshSurface> surface(new DvrVideoMeshSurface);
+ surface->client =
+ android::dvr::VideoMeshSurfaceClient::Import(std::move(surface_handle));
+
+ // TODO(jwcai) The next line is not needed...
+ auto producer_queue = surface->client->GetProducerQueue();
+ return surface.release();
+}
+
+extern "C" void dvrGraphicsVideoMeshSurfaceDestroy(
+ DvrVideoMeshSurface* surface) {
+ delete surface;
+}
+
+extern "C" void dvrGraphicsVideoMeshSurfacePresent(
+ DvrGraphicsContext* graphics_context, DvrVideoMeshSurface* surface,
+ const int eye, const float* transform) {
+ volatile android::dvr::VideoMeshSurfaceMetadata* metadata =
+ surface->client->GetMetadataBufferPtr();
+
+ const uint32_t graphics_buffer_index =
+ graphics_context->current_buffer->surface_buffer_index();
+
+ for (int i = 0; i < 4; ++i) {
+ metadata->transform[graphics_buffer_index][eye].val[i] = {
+ transform[i + 0], transform[i + 4], transform[i + 8], transform[i + 12],
+ };
+ }
+}
diff --git a/libs/vr/libdisplay/include/CPPLINT.cfg b/libs/vr/libdisplay/include/CPPLINT.cfg
new file mode 100644
index 0000000..2f8a3c0
--- /dev/null
+++ b/libs/vr/libdisplay/include/CPPLINT.cfg
@@ -0,0 +1 @@
+filter=-build/header_guard
diff --git a/libs/vr/libdisplay/include/dvr/graphics.h b/libs/vr/libdisplay/include/dvr/graphics.h
new file mode 100644
index 0000000..50d2754
--- /dev/null
+++ b/libs/vr/libdisplay/include/dvr/graphics.h
@@ -0,0 +1,475 @@
+#ifndef DVR_GRAPHICS_H_
+#define DVR_GRAPHICS_H_
+
+#include <EGL/egl.h>
+#include <sys/cdefs.h>
+
+#ifdef __ARM_NEON
+#include <arm_neon.h>
+#else
+#ifndef __FLOAT32X4T_86
+#define __FLOAT32X4T_86
+typedef float float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef struct float32x4x4_t { float32x4_t val[4]; };
+#endif
+#endif
+
+#ifndef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_USE_PLATFORM_ANDROID_KHR 1
+#endif
+#include <vulkan/vulkan.h>
+
+__BEGIN_DECLS
+
+// Create a stereo surface that will be lens-warped by the system.
+EGLNativeWindowType dvrCreateWarpedDisplaySurface(int* display_width,
+ int* display_height);
+EGLNativeWindowType dvrCreateDisplaySurface(void);
+
+// Display surface parameters used to specify display surface options.
+enum {
+ DVR_SURFACE_PARAMETER_NONE = 0,
+ // WIDTH
+ DVR_SURFACE_PARAMETER_WIDTH_IN,
+ // HEIGHT
+ DVR_SURFACE_PARAMETER_HEIGHT_IN,
+ // DISABLE_DISTORTION
+ DVR_SURFACE_PARAMETER_DISABLE_DISTORTION_IN,
+ // DISABLE_STABILIZATION
+ DVR_SURFACE_PARAMETER_DISABLE_STABILIZATION_IN,
+ // Disable chromatic aberration correction
+ DVR_SURFACE_PARAMETER_DISABLE_CAC_IN,
+ // ENABLE_LATE_LATCH: Enable late latching of pose data for application
+ // GPU shaders.
+ DVR_SURFACE_PARAMETER_ENABLE_LATE_LATCH_IN,
+ // VISIBLE
+ DVR_SURFACE_PARAMETER_VISIBLE_IN,
+ // Z_ORDER
+ DVR_SURFACE_PARAMETER_Z_ORDER_IN,
+ // EXCLUDE_FROM_BLUR
+ DVR_SURFACE_PARAMETER_EXCLUDE_FROM_BLUR_IN,
+ // BLUR_BEHIND
+ DVR_SURFACE_PARAMETER_BLUR_BEHIND_IN,
+ // DISPLAY_WIDTH
+ DVR_SURFACE_PARAMETER_DISPLAY_WIDTH_OUT,
+ // DISPLAY_HEIGHT
+ DVR_SURFACE_PARAMETER_DISPLAY_HEIGHT_OUT,
+ // SURFACE_WIDTH: Returns width of allocated surface buffer.
+ DVR_SURFACE_PARAMETER_SURFACE_WIDTH_OUT,
+ // SURFACE_HEIGHT: Returns height of allocated surface buffer.
+ DVR_SURFACE_PARAMETER_SURFACE_HEIGHT_OUT,
+ // INTER_LENS_METERS: Returns float value in meters, the distance between
+ // lenses.
+ DVR_SURFACE_PARAMETER_INTER_LENS_METERS_OUT,
+ // LEFT_FOV_LRBT: Return storage must have room for array of 4 floats (in
+ // radians). The layout is left, right, bottom, top as indicated by LRBT.
+ DVR_SURFACE_PARAMETER_LEFT_FOV_LRBT_OUT,
+ // RIGHT_FOV_LRBT: Return storage must have room for array of 4 floats (in
+ // radians). The layout is left, right, bottom, top as indicated by LRBT.
+ DVR_SURFACE_PARAMETER_RIGHT_FOV_LRBT_OUT,
+ // VSYNC_PERIOD: Returns the period of the display refresh (in
+ // nanoseconds per refresh), as a 64-bit unsigned integer.
+ DVR_SURFACE_PARAMETER_VSYNC_PERIOD_OUT,
+ // SURFACE_TEXTURE_TARGET_TYPE: Returns the type of texture used as the render
+ // target.
+ DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_TYPE_OUT,
+ // SURFACE_TEXTURE_TARGET_ID: Returns the texture ID used as the render
+ // target.
+ DVR_SURFACE_PARAMETER_SURFACE_TEXTURE_TARGET_ID_OUT,
+ // Whether the surface needs to be flipped vertically before display. Default
+ // is 0.
+ DVR_SURFACE_PARAMETER_VERTICAL_FLIP_IN,
+ // A bool indicating whether or not to create a GL context for the surface.
+ // 0: don't create a context
+ // Non-zero: create a context.
+ // Default is 1.
+ // If this value is 0, there must be a GLES 3.2 or greater context bound on
+ // the current thread at the time dvrGraphicsContextCreate is called.
+ DVR_SURFACE_PARAMETER_CREATE_GL_CONTEXT_IN,
+ // Specify one of DVR_SURFACE_GEOMETRY_*.
+ DVR_SURFACE_PARAMETER_GEOMETRY_IN,
+ // FORMAT: One of DVR_SURFACE_FORMAT_RGBA_8888 or DVR_SURFACE_FORMAT_RGB_565.
+ // Default is DVR_SURFACE_FORMAT_RGBA_8888.
+ DVR_SURFACE_PARAMETER_FORMAT_IN,
+ // GRAPHICS_API: One of DVR_SURFACE_GRAPHICS_API_GLES or
+ // DVR_SURFACE_GRAPHICS_API_VULKAN. Default is GLES.
+ DVR_SURFACE_PARAMETER_GRAPHICS_API_IN,
+ // VK_INSTANCE: In Vulkan mode, the application creates a VkInstance and
+ // passes it in.
+ DVR_SURFACE_PARAMETER_VK_INSTANCE_IN,
+ // VK_PHYSICAL_DEVICE: In Vulkan mode, the application passes in the
+ // PhysicalDevice handle corresponding to the logical device passed to
+ // VK_DEVICE.
+ DVR_SURFACE_PARAMETER_VK_PHYSICAL_DEVICE_IN,
+ // VK_DEVICE: In Vulkan mode, the application creates a VkDevice and
+ // passes it in.
+ DVR_SURFACE_PARAMETER_VK_DEVICE_IN,
+ // VK_PRESENT_QUEUE: In Vulkan mode, the application selects a
+ // presentation-compatible VkQueue and passes it in.
+ DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_IN,
+ // VK_PRESENT_QUEUE_FAMILY: In Vulkan mode, the application passes in the
+ // index of the queue family containing the VkQueue passed to
+ // VK_PRESENT_QUEUE.
+ DVR_SURFACE_PARAMETER_VK_PRESENT_QUEUE_FAMILY_IN,
+ // VK_SWAPCHAIN_IMAGE_COUNT: In Vulkan mode, the number of swapchain images
+ // will be returned here.
+ DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_COUNT_OUT,
+ // VK_SWAPCHAIN_IMAGE_FORMAT: In Vulkan mode, the VkFormat of the swapchain
+ // images will be returned here.
+ DVR_SURFACE_PARAMETER_VK_SWAPCHAIN_IMAGE_FORMAT_OUT,
+};
+
+enum {
+ // Default surface type. One wide buffer with the left eye view in the left
+ // half and the right eye view in the right half.
+ DVR_SURFACE_GEOMETRY_SINGLE,
+ // Separate buffers, one per eye. The width parameters still refer to the
+ // total width (2 * eye view width).
+ DVR_SURFACE_GEOMETRY_SEPARATE_2,
+};
+
+// Surface format. Gvr only supports RGBA_8888 and RGB_565 for now, so those are
+// the only formats we provide here.
+enum {
+ DVR_SURFACE_FORMAT_RGBA_8888,
+ DVR_SURFACE_FORMAT_RGB_565,
+};
+
+enum {
+ // Graphics contexts are created for OpenGL ES client applications by default.
+ DVR_GRAPHICS_API_GLES,
+ // Create the graphics context for Vulkan client applications.
+ DVR_GRAPHICS_API_VULKAN,
+};
+
+#define DVR_SURFACE_PARAMETER_IN(name, value) \
+ { DVR_SURFACE_PARAMETER_##name##_IN, (value), NULL }
+#define DVR_SURFACE_PARAMETER_OUT(name, value) \
+ { DVR_SURFACE_PARAMETER_##name##_OUT, 0, (value) }
+#define DVR_SURFACE_PARAMETER_LIST_END \
+ { DVR_SURFACE_PARAMETER_NONE, 0, NULL }
+
+struct DvrSurfaceParameter {
+ int32_t key;
+ int64_t value;
+ void* value_out;
+};
+
+// This is a convenience struct to hold the relevant information of the HMD
+// lenses.
+struct DvrLensInfo {
+ float inter_lens_meters;
+ float left_fov[4];
+ float right_fov[4];
+};
+
+// Creates a display surface with the given parameters. The list of parameters
+// is terminated with an entry where key == DVR_SURFACE_PARAMETER_NONE.
+// For example, the parameters array could be built as follows:
+// int display_width = 0, display_height = 0;
+// int surface_width = 0, surface_height = 0;
+// float inter_lens_meters = 0.0f;
+// float left_fov[4] = {0.0f};
+// float right_fov[4] = {0.0f};
+// int disable_warp = 0;
+// DvrSurfaceParameter surface_params[] = {
+// DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
+// DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
+// DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
+// DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
+// DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
+// DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
+// DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
+// DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
+// DVR_SURFACE_PARAMETER_LIST_END,
+// };
+EGLNativeWindowType dvrCreateDisplaySurfaceExtended(
+ struct DvrSurfaceParameter* parameters);
+
+int dvrGetNativeDisplayDimensions(int* native_width, int* native_height);
+
+int dvrGetDisplaySurfaceInfo(EGLNativeWindowType win, int* width, int* height,
+ int* format);
+
+// NOTE: Only call the functions below on windows created with the API above.
+
+// Sets the display surface visible based on the boolean evaluation of
+// |visible|.
+void dvrDisplaySurfaceSetVisible(EGLNativeWindowType window, int visible);
+
+// Sets the application z-order of the display surface. Higher values display on
+// top of lower values.
+void dvrDisplaySurfaceSetZOrder(EGLNativeWindowType window, int z_order);
+
+// Post the next buffer early. This allows the application to race with either
+// the async EDS process or the scanline for applications that are not using
+// system distortion. When this is called, the next buffer in the queue is
+// posted for display. It is up to the application to kick its GPU rendering
+// work in time. If the rendering is incomplete there will be significant,
+// undesirable tearing artifacts.
+// It is not recommended to use this feature with system distortion.
+void dvrDisplayPostEarly(EGLNativeWindowType window);
+
+// Opaque struct that represents a graphics context, the texture swap chain,
+// and surfaces.
+typedef struct DvrGraphicsContext DvrGraphicsContext;
+
+// Create the graphics context.
+int dvrGraphicsContextCreate(struct DvrSurfaceParameter* parameters,
+ DvrGraphicsContext** return_graphics_context);
+
+// Destroy the graphics context.
+void dvrGraphicsContextDestroy(DvrGraphicsContext* graphics_context);
+
+// For every frame a schedule is decided by the system compositor. A sample
+// schedule for two frames is shown below.
+//
+// | | |
+// |-----------------|------|-----------------|------|
+// | | |
+// V0 A1 V1 A2 V2
+//
+// V0, V1, and V2 are display vsync events. Vsync events are uniquely identified
+// throughout the DVR system by a vsync count maintained by the system
+// compositor.
+//
+// A1 and A2 indicate when the application should finish rendering its frame,
+// including all GPU work. Under normal circumstances the scheduled finish
+// finish time will be set a few milliseconds before the vsync time, to give the
+// compositor time to perform distortion and EDS on the app's buffer. For apps
+// that don't use system distortion the scheduled frame finish time will be
+// closer to the vsync time. Other factors can also effect the scheduled frame
+// finish time, e.g. whether or not the System UI is being displayed.
+typedef struct DvrFrameSchedule {
+ // vsync_count is used as a frame identifier.
+ uint32_t vsync_count;
+
+ // The time when the app should finish rendering its frame, including all GPU
+ // work.
+ int64_t scheduled_frame_finish_ns;
+} DvrFrameSchedule;
+
+// Sleep until it's time to render the next frame. This should be the first
+// function called as part of an app's render loop, which normally looks like
+// this:
+//
+// while (1) {
+// DvrFrameSchedule schedule;
+// dvrGraphicsWaitNextFrame(..., &schedule); // Sleep until it's time to
+// // render the next frame
+// pose = dvrPoseGet(schedule.vsync_count);
+// dvrBeginRenderFrame(...);
+// <render a frame using the pose>
+// dvrPresent(...); // Post the buffer
+// }
+//
+// |start_delay_ns| adjusts how long this function blocks the app from starting
+// its next frame. If |start_delay_ns| is 0, the function waits until the
+// scheduled frame finish time for the current frame, which gives the app one
+// full vsync period to render the next frame. If the app needs less than a full
+// vysnc period to render the frame, pass in a non-zero |start_delay_ns| to
+// delay the start of frame rendering further. For example, if the vsync period
+// is 11.1ms and the app takes 6ms to render a frame, consider setting this to
+// 5ms (note that the value is in nanoseconds, so 5,000,000ns) so that the app
+// finishes the frame closer to the scheduled frame finish time. Delaying the
+// start of rendering allows the app to use a more up-to-date pose for
+// rendering.
+// |start_delay_ns| must be a positive value or 0. If you're unsure what to set
+// for |start_delay_ns|, use 0.
+//
+// |out_next_frame_schedule| is an output parameter that will contain the
+// schedule for the next frame. It can be null. This function returns a negative
+// error code on failure.
+int dvrGraphicsWaitNextFrame(DvrGraphicsContext* graphics_context,
+ int64_t start_delay_ns,
+ DvrFrameSchedule* out_next_frame_schedule);
+
+// Prepares the graphics context's texture for rendering. This function should
+// be called once for each frame, ideally immediately before the first GL call
+// on the framebuffer which wraps the surface texture.
+//
+// For GL contexts, GL states are modified as follows by this function:
+// glBindTexture(GL_TEXTURE_2D, 0);
+//
+// @param[in] graphics_context The DvrGraphicsContext.
+// @param[in] render_pose_orientation Head pose orientation that rendering for
+// this frame will be based off of. This must be an unmodified value
+// from DvrPoseAsync, returned by dvrPoseGet.
+// @param[in] render_pose_translation Head pose translation that rendering for
+// this frame will be based off of. This must be an unmodified value
+// from DvrPoseAsync, returned by dvrPoseGet.
+// @return 0 on success or a negative error code on failure.
+// Check GL errors with glGetError for other error conditions.
+int dvrBeginRenderFrameEds(DvrGraphicsContext* graphics_context,
+ float32x4_t render_pose_orientation,
+ float32x4_t render_pose_translation);
+int dvrBeginRenderFrameEdsVk(DvrGraphicsContext* graphics_context,
+ float32x4_t render_pose_orientation,
+ float32x4_t render_pose_translation,
+ VkSemaphore acquire_semaphore,
+ VkFence acquire_fence,
+ uint32_t* swapchain_image_index,
+ VkImageView* swapchain_image_view);
+// Same as dvrBeginRenderFrameEds, but with no EDS (asynchronous reprojection).
+//
+// For GL contexts, GL states are modified as follows by this function:
+// glBindTexture(GL_TEXTURE_2D, 0);
+//
+// @param[in] graphics_context The DvrGraphicsContext.
+// @return 0 on success or a negative error code on failure.
+// Check GL errors with glGetError for other error conditions.
+int dvrBeginRenderFrame(DvrGraphicsContext* graphics_context);
+int dvrBeginRenderFrameVk(DvrGraphicsContext* graphics_context,
+ VkSemaphore acquire_semaphore, VkFence acquire_fence,
+ uint32_t* swapchain_image_index,
+ VkImageView* swapchain_image_view);
+
+// Maximum number of views per surface buffer (for multiview, multi-eye, etc).
+#define DVR_GRAPHICS_SURFACE_MAX_VIEWS 4
+
+// Output data format of late latch shader. The application can bind all or part
+// of this data with the buffer ID returned by dvrBeginRenderFrameLateLatch.
+// This struct is compatible with std140 layout for use from shaders.
+struct __attribute__((__packed__)) DvrGraphicsLateLatchData {
+ // Column-major order.
+ float view_proj_matrix[DVR_GRAPHICS_SURFACE_MAX_VIEWS][16];
+ // Column-major order.
+ float view_matrix[DVR_GRAPHICS_SURFACE_MAX_VIEWS][16];
+ // Quaternion for pose orientation from start space.
+ float pose_orientation[4];
+ // Pose translation from start space.
+ float pose_translation[4];
+};
+
+// Begin render frame with late latching of pose data. This kicks off a compute
+// shader that will read the latest head pose and then compute and output
+// matrices that can be used by application shaders.
+//
+// Matrices are computed with the following pseudo code.
+// Pose pose = getLateLatchPose();
+// out.pose_orientation = pose.orientation;
+// out.pose_translation = pose.translation;
+// mat4 head_from_center = ComputeInverseMatrix(pose);
+// for each view:
+// out.viewMatrix[view] =
+// eye_from_head_matrices[view] * head_from_center *
+// pose_offset_matrices[view];
+// out.viewProjMatrix[view] =
+// projection_matrices[view] * out.viewMatrix[view];
+//
+// For GL contexts, GL states are modified as follows by this function:
+// glBindTexture(GL_TEXTURE_2D, 0);
+// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, 0);
+// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, 0);
+// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, 0);
+// glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, 0);
+// glUseProgram(0);
+//
+// @param[in] graphics_context The DvrGraphicsContext.
+// @param[in] flags Specify 0.
+// @param[in] target_vsync_count The target vsync count that this frame will
+// display at. This is used for pose prediction.
+// @param[in] num_views Number of matrices in each of the following matrix array
+// parameters. Typically 2 for left and right eye views. Maximum is
+// DVR_GRAPHICS_SURFACE_MAX_VIEWS.
+// @param[in] projection_matrices Array of pointers to |num_views| matrices with
+// column-major layout. These are the application projection
+// matrices.
+// @param[in] eye_from_head_matrices Array of pointers to |num_views| matrices
+// with column-major layout. See pseudo code for how these are used.
+// @param[in] pose_offset_matrices Array of pointers to |num_views| matrices
+// with column-major layout. See pseudo code for how these are used.
+// @param[out] out_late_latch_buffer_id The GL buffer ID of the output buffer of
+// of type DvrGraphicsLateLatchData.
+// @return 0 on success or a negative error code on failure.
+// Check GL errors with glGetError for other error conditions.
+int dvrBeginRenderFrameLateLatch(DvrGraphicsContext* graphics_context,
+ uint32_t flags, uint32_t target_vsync_count,
+ int num_views,
+ const float** projection_matrices,
+ const float** eye_from_head_matrices,
+ const float** pose_offset_matrices,
+ uint32_t* out_late_latch_buffer_id);
+
+// Present a frame for display.
+// This call is normally non-blocking, unless the internal buffer queue is full.
+// @return 0 on success or a negative error code on failure.
+int dvrPresent(DvrGraphicsContext* graphics_context);
+int dvrPresentVk(DvrGraphicsContext* graphics_context,
+ VkSemaphore submit_semaphore, uint32_t swapchain_image_index);
+
+// Post the next buffer early. This allows the application to race with either
+// the async EDS process or the scanline for applications that are not using
+// system distortion. When this is called, the next buffer in the queue is
+// posted for display. It is up to the application to kick its GPU rendering
+// work in time. If the rendering is incomplete there will be significant,
+// undesirable tearing artifacts.
+// It is not recommended to use this feature with system distortion.
+void dvrGraphicsPostEarly(DvrGraphicsContext* graphics_context);
+
+// Used to retrieve frame measurement timings from dvrGetFrameScheduleResults().
+typedef struct DvrFrameScheduleResult {
+ // vsync_count is used as a frame identifier.
+ uint32_t vsync_count;
+
+ // The app's scheduled frame finish time.
+ int64_t scheduled_frame_finish_ns;
+
+ // The difference (in nanoseconds) between the scheduled finish time and the
+ // actual finish time.
+ //
+ // A value of +2ms for frame_finish_offset_ns indicates the app's frame was
+ // late and may have been skipped by the compositor for that vsync. A value of
+ // -1ms indicates the app's frame finished just ahead of schedule, as
+ // desired. A value of -6ms indicates the app's frame finished well ahead of
+ // schedule for that vsync. In that case the app may have unnecessary visual
+ // latency. Consider using the start_delay_ns parameter in
+ // dvrGraphicsWaitNextFrame() to align the app's frame finish time closer to
+ // the scheduled finish time.
+ int64_t frame_finish_offset_ns;
+} DvrFrameScheduleResult;
+
+// Retrieve the latest frame schedule results for the app. To collect all the
+// results this should be called each frame. The results for each frame are
+// returned only once.
+// The number of results written to |results| is returned on success, or a
+// negative error code on failure.
+// |graphics_context| is the context to retrieve frame schedule results for.
+// |results| is an array that will contain the frame schedule results.
+// |result_count| is the size of the |results| array. It's recommended to pass
+// in an array with 2 elements to ensure results for all frames are collected.
+int dvrGetFrameScheduleResults(DvrGraphicsContext* graphics_context,
+ DvrFrameScheduleResult* results,
+ int result_count);
+
+// Make the surface visible or hidden based on |visible|.
+// 0: hidden, Non-zero: visible.
+void dvrGraphicsSurfaceSetVisible(DvrGraphicsContext* graphics_context,
+ int visible);
+
+// Returns surface visilibity last requested by the client.
+int dvrGraphicsSurfaceGetVisible(DvrGraphicsContext* graphics_context);
+
+// Returns surface z order last requested by the client.
+int dvrGraphicsSurfaceGetZOrder(DvrGraphicsContext* graphics_context);
+
+// Sets the compositor z-order of the surface. Higher values display on
+// top of lower values.
+void dvrGraphicsSurfaceSetZOrder(DvrGraphicsContext* graphics_context,
+ int z_order);
+
+typedef struct DvrVideoMeshSurface DvrVideoMeshSurface;
+
+DvrVideoMeshSurface* dvrGraphicsVideoMeshSurfaceCreate(
+ DvrGraphicsContext* graphics_context);
+void dvrGraphicsVideoMeshSurfaceDestroy(DvrVideoMeshSurface* surface);
+
+// Present a VideoMeshSurface with the current video mesh transfromation matrix.
+void dvrGraphicsVideoMeshSurfacePresent(DvrGraphicsContext* graphics_context,
+ DvrVideoMeshSurface* surface,
+ const int eye,
+ const float* transform);
+
+__END_DECLS
+
+#endif // DVR_GRAPHICS_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/display_client.h b/libs/vr/libdisplay/include/private/dvr/display_client.h
new file mode 100644
index 0000000..034b7b4
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/display_client.h
@@ -0,0 +1,126 @@
+#ifndef ANDROID_DVR_DISPLAY_CLIENT_H_
+#define ANDROID_DVR_DISPLAY_CLIENT_H_
+
+#include <hardware/hwcomposer.h>
+#include <pdx/client.h>
+#include <pdx/file_handle.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_rpc.h>
+
+namespace android {
+namespace dvr {
+
+struct LateLatchOutput;
+
+// Abstract base class for all surface types maintained in DVR's display
+// service.
+// TODO(jwcai) Explain more, surface is a channel...
+class SurfaceClient : public pdx::Client {
+ public:
+ using LocalChannelHandle = pdx::LocalChannelHandle;
+ SurfaceType type() const { return type_; }
+
+ // Get the shared memory metadata buffer fd for this display surface. If it is
+ // not yet allocated, this will allocate it.
+ int GetMetadataBufferFd(pdx::LocalHandle* out_fd);
+
+ // Allocate the single metadata buffer for providing metadata associated with
+ // posted buffers for this surface. This can be used to provide rendered poses
+ // for EDS, for example. The buffer format is defined by the struct
+ // DisplaySurfaceMetadata.
+ // The first call to this method will allocate the buffer in via IPC to the
+ // display surface.
+ std::shared_ptr<BufferProducer> GetMetadataBuffer();
+
+ protected:
+ SurfaceClient(LocalChannelHandle channel_handle, SurfaceType type);
+ SurfaceClient(const std::string& endpoint_path, SurfaceType type);
+
+ private:
+ SurfaceType type_;
+ std::shared_ptr<BufferProducer> metadata_buffer_;
+};
+
+// DisplaySurfaceClient represents the client interface to a displayd display
+// surface.
+class DisplaySurfaceClient
+ : public pdx::ClientBase<DisplaySurfaceClient, SurfaceClient> {
+ public:
+ using LocalHandle = pdx::LocalHandle;
+
+ int width() const { return width_; }
+ int height() const { return height_; }
+ int format() const { return format_; }
+ int usage() const { return usage_; }
+ int flags() const { return flags_; }
+ int z_order() const { return z_order_; }
+ bool visible() const { return visible_; }
+
+ void SetVisible(bool visible);
+ void SetZOrder(int z_order);
+ void SetExcludeFromBlur(bool exclude_from_blur);
+ void SetBlurBehind(bool blur_behind);
+ void SetAttributes(const DisplaySurfaceAttributes& attributes);
+
+ // |out_buffer_index| will receive a unique index for this buffer within the
+ // surface. The first buffer gets 0, second gets 1, and so on. This index
+ // can be used to deliver metadata for buffers that are queued for display.
+ std::shared_ptr<BufferProducer> AllocateBuffer(uint32_t* out_buffer_index);
+ std::shared_ptr<BufferProducer> AllocateBuffer() {
+ return AllocateBuffer(nullptr);
+ }
+
+ // Get the shared memory metadata buffer for this display surface. If it is
+ // not yet allocated, this will allocate it.
+ volatile DisplaySurfaceMetadata* GetMetadataBufferPtr();
+
+ // Create a VideoMeshSurface that is attached to the display sruface.
+ LocalChannelHandle CreateVideoMeshSurface();
+
+ private:
+ friend BASE;
+
+ DisplaySurfaceClient(int width, int height, int format, int usage, int flags);
+
+ int width_;
+ int height_;
+ int format_;
+ int usage_;
+ int flags_;
+ int z_order_;
+ bool visible_;
+ bool exclude_from_blur_;
+ bool blur_behind_;
+ DisplaySurfaceMetadata* mapped_metadata_buffer_;
+
+ DisplaySurfaceClient(const DisplaySurfaceClient&) = delete;
+ void operator=(const DisplaySurfaceClient&) = delete;
+};
+
+class DisplayClient : public pdx::ClientBase<DisplayClient> {
+ public:
+ int GetDisplayMetrics(SystemDisplayMetrics* metrics);
+ pdx::Status<void> SetViewerParams(const ViewerParams& viewer_params);
+
+ // Pull the latest eds pose data from the display service renderer
+ int GetLastFrameEdsTransform(LateLatchOutput* ll_out);
+
+ int EnterVrMode();
+ int ExitVrMode();
+
+ std::unique_ptr<DisplaySurfaceClient> CreateDisplaySurface(
+ int width, int height, int format, int usage, int flags);
+
+ private:
+ friend BASE;
+
+ explicit DisplayClient(int* error = nullptr);
+
+ DisplayClient(const DisplayClient&) = delete;
+ void operator=(const DisplayClient&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_DISPLAY_CLIENT_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/display_manager_client.h b/libs/vr/libdisplay/include/private/dvr/display_manager_client.h
new file mode 100644
index 0000000..f28c1e4
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/display_manager_client.h
@@ -0,0 +1,73 @@
+#ifndef DVR_DISPLAY_MANAGER_CLIENT_H_
+#define DVR_DISPLAY_MANAGER_CLIENT_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct DvrDisplayManagerClient DvrDisplayManagerClient;
+typedef struct DvrDisplayManagerClientSurfaceList
+ DvrDisplayManagerClientSurfaceList;
+typedef struct DvrDisplayManagerClientSurfaceBuffers
+ DvrDisplayManagerClientSurfaceBuffers;
+
+DvrDisplayManagerClient* dvrDisplayManagerClientCreate();
+
+void dvrDisplayManagerClientDestroy(DvrDisplayManagerClient* client);
+
+// If successful, populates |surface_list| with a list of application
+// surfaces the display is currently using.
+//
+// @return 0 on success. Otherwise it returns a negative error value.
+int dvrDisplayManagerClientGetSurfaceList(
+ DvrDisplayManagerClient* client,
+ DvrDisplayManagerClientSurfaceList** surface_list);
+
+void dvrDisplayManagerClientSurfaceListDestroy(
+ DvrDisplayManagerClientSurfaceList* surface_list);
+
+// @return Returns the number of surfaces in the list.
+size_t dvrDisplayManagerClientSurfaceListGetSize(
+ DvrDisplayManagerClientSurfaceList* surface_list);
+
+// @return Return a unique identifier for a client surface. The identifier can
+// be used to query for other surface properties.
+int dvrDisplayManagerClientSurfaceListGetSurfaceId(
+ DvrDisplayManagerClientSurfaceList* surface_list, size_t index);
+
+// @return Returns the stacking order of the client surface at |index|.
+int dvrDisplayManagerClientSurfaceListGetClientZOrder(
+ DvrDisplayManagerClientSurfaceList* surface_list, size_t index);
+
+// @return Returns true if the client surface is visible, false otherwise.
+bool dvrDisplayManagerClientSurfaceListGetClientIsVisible(
+ DvrDisplayManagerClientSurfaceList* surface_list, size_t index);
+
+// Populates |surface_buffers| with the list of buffers for |surface_id|.
+// |surface_id| should be a valid ID from the list of surfaces.
+//
+// @return Returns 0 on success. Otherwise it returns a negative error value.
+int dvrDisplayManagerClientGetSurfaceBuffers(
+ DvrDisplayManagerClient* client, int surface_id,
+ DvrDisplayManagerClientSurfaceBuffers** surface_buffers);
+
+void dvrDisplayManagerClientSurfaceBuffersDestroy(
+ DvrDisplayManagerClientSurfaceBuffers* surface_buffers);
+
+// @return Returns the number of buffers.
+size_t dvrDisplayManagerClientSurfaceBuffersGetSize(
+ DvrDisplayManagerClientSurfaceBuffers* surface_buffers);
+
+// @return Returns the file descriptor for the buffer consumer at |index|.
+int dvrDisplayManagerClientSurfaceBuffersGetFd(
+ DvrDisplayManagerClientSurfaceBuffers* surface_buffers, size_t index);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // DVR_DISPLAY_MANAGER_CLIENT_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/display_manager_client_impl.h b/libs/vr/libdisplay/include/private/dvr/display_manager_client_impl.h
new file mode 100644
index 0000000..645ccce
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/display_manager_client_impl.h
@@ -0,0 +1,35 @@
+#ifndef ANDROID_DVR_DISPLAY_MANAGER_CLIENT_IMPL_H_
+#define ANDROID_DVR_DISPLAY_MANAGER_CLIENT_IMPL_H_
+
+#include <vector>
+
+#include <pdx/client.h>
+#include <private/dvr/display_rpc.h>
+
+namespace android {
+namespace dvr {
+
+class BufferConsumer;
+
+class DisplayManagerClient : public pdx::ClientBase<DisplayManagerClient> {
+ public:
+ ~DisplayManagerClient() override;
+
+ int GetSurfaceList(std::vector<DisplaySurfaceInfo>* surface_list);
+
+ int GetSurfaceBuffers(
+ int surface_id, std::vector<std::unique_ptr<BufferConsumer>>* consumers);
+
+ private:
+ friend BASE;
+
+ DisplayManagerClient();
+
+ DisplayManagerClient(const DisplayManagerClient&) = delete;
+ void operator=(const DisplayManagerClient&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_DISPLAY_MANAGER_CLIENT_IMPL_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/display_rpc.h b/libs/vr/libdisplay/include/private/dvr/display_rpc.h
new file mode 100644
index 0000000..6150b35
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/display_rpc.h
@@ -0,0 +1,342 @@
+#ifndef ANDROID_DVR_DISPLAY_RPC_H_
+#define ANDROID_DVR_DISPLAY_RPC_H_
+
+#include <sys/types.h>
+
+#include <array>
+#include <map>
+
+#include <pdx/rpc/remote_method.h>
+#include <pdx/rpc/serializable.h>
+#include <pdx/rpc/variant.h>
+#include <private/dvr/display_types.h>
+
+namespace android {
+namespace dvr {
+
+struct SystemDisplayMetrics {
+ uint32_t display_native_width;
+ uint32_t display_native_height;
+ uint32_t display_x_dpi;
+ uint32_t display_y_dpi;
+ uint32_t distorted_width;
+ uint32_t distorted_height;
+ uint32_t vsync_period_ns;
+ uint32_t hmd_ipd_mm;
+ float inter_lens_distance_m;
+ std::array<float, 4> left_fov_lrbt;
+ std::array<float, 4> right_fov_lrbt;
+
+ private:
+ PDX_SERIALIZABLE_MEMBERS(SystemDisplayMetrics, display_native_width,
+ display_native_height, display_x_dpi, display_y_dpi,
+ distorted_width, distorted_height, vsync_period_ns,
+ hmd_ipd_mm, inter_lens_distance_m, left_fov_lrbt,
+ right_fov_lrbt);
+};
+
+using SurfaceType = uint32_t;
+struct SurfaceTypeEnum {
+ enum : SurfaceType {
+ Normal = DVR_SURFACE_TYPE_NORMAL,
+ VideoMesh = DVR_SURFACE_TYPE_VIDEO_MESH,
+ Overlay = DVR_SURFACE_TYPE_OVERLAY,
+ };
+};
+
+using DisplaySurfaceFlags = uint32_t;
+enum class DisplaySurfaceFlagsEnum : DisplaySurfaceFlags {
+ DisableSystemEds = DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_EDS,
+ DisableSystemDistortion = DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION,
+ VerticalFlip = DVR_DISPLAY_SURFACE_FLAGS_VERTICAL_FLIP,
+ SeparateGeometry = DVR_DISPLAY_SURFACE_FLAGS_GEOMETRY_SEPARATE_2,
+ DisableSystemCac = DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_CAC,
+};
+
+using DisplaySurfaceInfoFlags = uint32_t;
+enum class DisplaySurfaceInfoFlagsEnum : DisplaySurfaceInfoFlags {
+ BuffersChanged = DVR_DISPLAY_SURFACE_ITEM_FLAGS_BUFFERS_CHANGED,
+};
+
+using DisplaySurfaceAttributeValue =
+ pdx::rpc::Variant<int32_t, int64_t, bool, float, std::array<float, 2>,
+ std::array<float, 3>, std::array<float, 4>,
+ std::array<float, 16>>;
+using DisplaySurfaceAttribute = uint32_t;
+struct DisplaySurfaceAttributeEnum {
+ enum : DisplaySurfaceAttribute {
+ ZOrder = DVR_DISPLAY_SURFACE_ATTRIBUTE_Z_ORDER,
+ Visible = DVR_DISPLAY_SURFACE_ATTRIBUTE_VISIBLE,
+ // Manager only.
+ Blur = DVR_DISPLAY_SURFACE_ATTRIBUTE_BLUR,
+ // Client only.
+ ExcludeFromBlur = DVR_DISPLAY_SURFACE_ATTRIBUTE_EXCLUDE_FROM_BLUR,
+ BlurBehind = DVR_DISPLAY_SURFACE_ATTRIBUTE_BLUR_BEHIND,
+ };
+
+ static std::string ToString(DisplaySurfaceAttribute attribute) {
+ switch (attribute) {
+ case ZOrder:
+ return "z-order";
+ case Visible:
+ return "visible";
+ case Blur:
+ return "blur";
+ case ExcludeFromBlur:
+ return "exclude-from-blur";
+ case BlurBehind:
+ return "blur-behind";
+ default:
+ return "unknown";
+ }
+ }
+};
+
+using DisplaySurfaceAttributes =
+ std::map<DisplaySurfaceAttribute, DisplaySurfaceAttributeValue>;
+
+struct DisplaySurfaceInfo {
+ int surface_id;
+ int process_id;
+ SurfaceType type;
+ DisplaySurfaceFlags flags;
+ DisplaySurfaceInfoFlags info_flags;
+ DisplaySurfaceAttributes client_attributes;
+ DisplaySurfaceAttributes manager_attributes;
+
+ // Convenience accessors.
+ bool IsClientVisible() const {
+ const auto* variant =
+ FindClientAttribute(DisplaySurfaceAttributeEnum::Visible);
+ bool bool_value;
+ if (variant && pdx::rpc::IfAnyOf<int32_t, int64_t, bool, float>::Get(
+ variant, &bool_value))
+ return bool_value;
+ else
+ return false;
+ }
+
+ int ClientZOrder() const {
+ const auto* variant =
+ FindClientAttribute(DisplaySurfaceAttributeEnum::ZOrder);
+ int int_value;
+ if (variant &&
+ pdx::rpc::IfAnyOf<int32_t, int64_t, float>::Get(variant, &int_value))
+ return int_value;
+ else
+ return 0;
+ }
+
+ private:
+ const DisplaySurfaceAttributeValue* FindClientAttribute(
+ DisplaySurfaceAttribute key) const {
+ auto search = client_attributes.find(key);
+ return (search != client_attributes.end()) ? &search->second : nullptr;
+ }
+
+ PDX_SERIALIZABLE_MEMBERS(DisplaySurfaceInfo, surface_id, process_id, type,
+ flags, info_flags, client_attributes,
+ manager_attributes);
+};
+
+struct VideoMeshSurfaceBufferMetadata {
+ int64_t timestamp_ns;
+};
+
+struct AlignmentMarker {
+ public:
+ float horizontal;
+ float vertical;
+
+ PDX_SERIALIZABLE_MEMBERS(AlignmentMarker, horizontal, vertical);
+};
+
+struct DaydreamInternalParams {
+ public:
+ int32_t version;
+ std::vector<AlignmentMarker> alignment_markers;
+
+ PDX_SERIALIZABLE_MEMBERS(DaydreamInternalParams, version, alignment_markers);
+};
+
+struct ViewerParams {
+ public:
+ // TODO(hendrikw): Do we need viewer_vendor_name and viewer_model_name?
+ float screen_to_lens_distance;
+ float inter_lens_distance;
+ float screen_center_to_lens_distance;
+ std::vector<float> left_eye_field_of_view_angles;
+
+ enum VerticalAlignmentType : int32_t {
+ BOTTOM = 0, // phone rests against a fixed bottom tray
+ CENTER = 1, // phone screen assumed to be centered w.r.t. lenses
+ TOP = 2 // phone rests against a fixed top tray
+ };
+
+ enum EyeOrientation : int32_t {
+ kCCW0Degrees = 0,
+ kCCW90Degrees = 1,
+ kCCW180Degrees = 2,
+ kCCW270Degrees = 3,
+ kCCW0DegreesMirrored = 4,
+ kCCW90DegreesMirrored = 5,
+ kCCW180DegreesMirrored = 6,
+ kCCW270DegreesMirrored = 7
+ };
+
+ VerticalAlignmentType vertical_alignment;
+ std::vector<EyeOrientation> eye_orientations;
+
+ float tray_to_lens_distance;
+
+ std::vector<float> distortion_coefficients_r;
+ std::vector<float> distortion_coefficients_g;
+ std::vector<float> distortion_coefficients_b;
+
+ DaydreamInternalParams daydream_internal;
+
+ PDX_SERIALIZABLE_MEMBERS(ViewerParams, screen_to_lens_distance,
+ inter_lens_distance, screen_center_to_lens_distance,
+ left_eye_field_of_view_angles, vertical_alignment,
+ eye_orientations, tray_to_lens_distance,
+ distortion_coefficients_r, distortion_coefficients_g,
+ distortion_coefficients_b, daydream_internal);
+};
+
+struct DisplayRPC {
+ // Service path.
+ static constexpr char kClientPath[] = "system/display/client";
+
+ // Op codes.
+ enum {
+ kOpGetMetrics = 0,
+ kOpGetEdsCapture,
+ kOpCreateSurface,
+ kOpAllocateBuffer,
+ kOpSetAttributes,
+ kOpGetMetadataBuffer,
+ kOpCreateVideoMeshSurface,
+ kOpVideoMeshSurfaceCreateProducerQueue,
+ kOpEnterVrMode,
+ kOpExitVrMode,
+ kOpSetViewerParams
+ };
+
+ // Aliases.
+ using ByteBuffer = pdx::rpc::BufferWrapper<std::vector<uint8_t>>;
+ using LocalChannelHandle = pdx::LocalChannelHandle;
+ using Void = pdx::rpc::Void;
+
+ // Methods.
+ PDX_REMOTE_METHOD(GetMetrics, kOpGetMetrics, SystemDisplayMetrics(Void));
+ PDX_REMOTE_METHOD(GetEdsCapture, kOpGetEdsCapture, ByteBuffer(Void));
+ PDX_REMOTE_METHOD(CreateSurface, kOpCreateSurface,
+ int(int width, int height, int format, int usage,
+ DisplaySurfaceFlags flags));
+ PDX_REMOTE_METHOD(AllocateBuffer, kOpAllocateBuffer,
+ std::pair<std::uint32_t, LocalChannelHandle>(Void));
+ PDX_REMOTE_METHOD(SetAttributes, kOpSetAttributes,
+ int(const DisplaySurfaceAttributes& attributes));
+ PDX_REMOTE_METHOD(GetMetadataBuffer, kOpGetMetadataBuffer,
+ LocalChannelHandle(Void));
+ // VideoMeshSurface methods
+ PDX_REMOTE_METHOD(CreateVideoMeshSurface, kOpCreateVideoMeshSurface,
+ LocalChannelHandle(Void));
+ PDX_REMOTE_METHOD(VideoMeshSurfaceCreateProducerQueue,
+ kOpVideoMeshSurfaceCreateProducerQueue,
+ LocalChannelHandle(Void));
+ PDX_REMOTE_METHOD(EnterVrMode, kOpEnterVrMode, int(Void));
+ PDX_REMOTE_METHOD(ExitVrMode, kOpExitVrMode, int(Void));
+ PDX_REMOTE_METHOD(SetViewerParams, kOpSetViewerParams,
+ void(const ViewerParams& viewer_params));
+};
+
+struct DisplayManagerRPC {
+ // Service path.
+ static constexpr char kClientPath[] = "system/display/manager";
+
+ // Op codes.
+ enum {
+ kOpGetSurfaceList = 0,
+ kOpGetSurfaceBuffers,
+ kOpUpdateSurfaces,
+ };
+
+ // Aliases.
+ using LocalChannelHandle = pdx::LocalChannelHandle;
+ using Void = pdx::rpc::Void;
+
+ // Methods.
+ PDX_REMOTE_METHOD(GetSurfaceList, kOpGetSurfaceList,
+ std::vector<DisplaySurfaceInfo>(Void));
+ PDX_REMOTE_METHOD(GetSurfaceBuffers, kOpGetSurfaceBuffers,
+ std::vector<LocalChannelHandle>(int surface_id));
+ PDX_REMOTE_METHOD(
+ UpdateSurfaces, kOpUpdateSurfaces,
+ int(const std::map<int, DisplaySurfaceAttributes>& updates));
+};
+
+struct ScreenshotData {
+ int width;
+ int height;
+ std::vector<uint8_t> buffer;
+
+ private:
+ PDX_SERIALIZABLE_MEMBERS(ScreenshotData, width, height, buffer);
+};
+
+struct DisplayScreenshotRPC {
+ // Service path.
+ static constexpr char kClientPath[] = "system/display/screenshot";
+
+ // Op codes.
+ enum {
+ kOpGetFormat = 0,
+ kOpTakeScreenshot,
+ };
+
+ using Void = pdx::rpc::Void;
+
+ PDX_REMOTE_METHOD(GetFormat, kOpGetFormat, int(Void));
+ PDX_REMOTE_METHOD(TakeScreenshot, kOpTakeScreenshot,
+ ScreenshotData(int layer_index));
+};
+
+struct VSyncSchedInfo {
+ int64_t vsync_period_ns;
+ int64_t timestamp_ns;
+ uint32_t next_vsync_count;
+
+ private:
+ PDX_SERIALIZABLE_MEMBERS(VSyncSchedInfo, vsync_period_ns, timestamp_ns,
+ next_vsync_count);
+};
+
+struct DisplayVSyncRPC {
+ // Service path.
+ static constexpr char kClientPath[] = "system/display/vsync";
+
+ // Op codes.
+ enum {
+ kOpWait = 0,
+ kOpAck,
+ kOpGetLastTimestamp,
+ kOpGetSchedInfo,
+ kOpAcknowledge,
+ };
+
+ // Aliases.
+ using Void = pdx::rpc::Void;
+ using Timestamp = int64_t;
+
+ // Methods.
+ PDX_REMOTE_METHOD(Wait, kOpWait, Timestamp(Void));
+ PDX_REMOTE_METHOD(GetLastTimestamp, kOpGetLastTimestamp, Timestamp(Void));
+ PDX_REMOTE_METHOD(GetSchedInfo, kOpGetSchedInfo, VSyncSchedInfo(Void));
+ PDX_REMOTE_METHOD(Acknowledge, kOpAcknowledge, int(Void));
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_DISPLAY_RPC_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/display_types.h b/libs/vr/libdisplay/include/private/dvr/display_types.h
new file mode 100644
index 0000000..2bd02bd
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/display_types.h
@@ -0,0 +1,83 @@
+#ifndef ANDROID_DVR_DISPLAY_TYPES_H_
+#define ANDROID_DVR_DISPLAY_TYPES_H_
+
+#ifdef __ARM_NEON
+#include <arm_neon.h>
+#else
+#ifndef __FLOAT32X4T_86
+#define __FLOAT32X4T_86
+typedef float float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef struct float32x4x4_t { float32x4_t val[4]; };
+#endif
+#endif
+
+#include <cutils/native_handle.h>
+
+// DVR display-related data types.
+
+enum dvr_display_surface_type {
+ // Normal display surface meant to be used by applications' GL context to
+ // render into.
+ DVR_SURFACE_TYPE_NORMAL = 0,
+
+ // VideoMeshSurface is used to composite video frames into the 3D world.
+ DVR_SURFACE_TYPE_VIDEO_MESH,
+
+ // System overlay surface type. This is not currently in use.
+ DVR_SURFACE_TYPE_OVERLAY,
+};
+
+enum dvr_display_surface_flags {
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_EDS = (1 << 0),
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION = (1 << 1),
+ DVR_DISPLAY_SURFACE_FLAGS_VERTICAL_FLIP = (1 << 2),
+ DVR_DISPLAY_SURFACE_FLAGS_GEOMETRY_SEPARATE_2 = (1 << 3),
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_CAC = (1 << 4),
+};
+
+enum dvr_display_surface_item_flags {
+ DVR_DISPLAY_SURFACE_ITEM_FLAGS_BUFFERS_CHANGED = (1 << 0),
+};
+
+enum dvr_display_surface_attribute {
+ DVR_DISPLAY_SURFACE_ATTRIBUTE_Z_ORDER = (1<<0),
+ DVR_DISPLAY_SURFACE_ATTRIBUTE_VISIBLE = (1<<1),
+ DVR_DISPLAY_SURFACE_ATTRIBUTE_BLUR = (1<<2),
+ DVR_DISPLAY_SURFACE_ATTRIBUTE_EXCLUDE_FROM_BLUR = (1<<3),
+ DVR_DISPLAY_SURFACE_ATTRIBUTE_BLUR_BEHIND = (1<<4),
+};
+
+// Maximum number of buffers for a surface. Each buffer represents a single
+// frame and may actually be a buffer array if multiview rendering is in use.
+// Define so that it can be used in shader code.
+#define kSurfaceBufferMaxCount 4
+
+// Maximum number of views per surface. Each eye is a view, for example.
+#define kSurfaceViewMaxCount 4
+
+namespace android {
+namespace dvr {
+
+struct __attribute__((packed, aligned(16))) DisplaySurfaceMetadata {
+ // Array of orientations and translations corresponding with surface buffers.
+ // The index is associated with each allocated buffer by DisplaySurface and
+ // communicated to clients.
+ // The maximum number of buffers is hard coded here as 4 so that we can bind
+ // this data structure in GPU shaders.
+ float32x4_t orientation[kSurfaceBufferMaxCount];
+ float32x4_t translation[kSurfaceBufferMaxCount];
+};
+
+struct __attribute__((packed, aligned(16))) VideoMeshSurfaceMetadata {
+ // Array of transform matrices corresponding with surface buffers.
+ // Note that The index is associated with each allocated buffer by
+ // DisplaySurface instead of VideoMeshSurface due to the fact that the
+ // metadata here is interpreted as video mesh's transformation in each
+ // application's rendering frame.
+ float32x4x4_t transform[4][2];
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_DISPLAY_TYPES_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/dummy_native_window.h b/libs/vr/libdisplay/include/private/dvr/dummy_native_window.h
new file mode 100644
index 0000000..b03eeaa
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/dummy_native_window.h
@@ -0,0 +1,30 @@
+#ifndef ANDROID_DVR_DUMMY_NATIVE_WINDOW_H_
+#define ANDROID_DVR_DUMMY_NATIVE_WINDOW_H_
+
+#include <android/native_window.h>
+#include <ui/ANativeObjectBase.h>
+
+namespace android {
+namespace dvr {
+
+// DummyNativeWindow is an implementation of ANativeWindow that is
+// essentially empty and is used as a surface placeholder during context
+// creation for contexts that we don't intend to call eglSwapBuffers on.
+class DummyNativeWindow
+ : public ANativeObjectBase<ANativeWindow, DummyNativeWindow,
+ LightRefBase<DummyNativeWindow> > {
+ public:
+ DummyNativeWindow();
+
+ private:
+ static int Query(const ANativeWindow* window, int what, int* value);
+ static int Perform(ANativeWindow* window, int operation, ...);
+
+ DummyNativeWindow(const DummyNativeWindow&) = delete;
+ void operator=(DummyNativeWindow&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_DUMMY_NATIVE_WINDOW_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/frame_history.h b/libs/vr/libdisplay/include/private/dvr/frame_history.h
new file mode 100644
index 0000000..53e0717
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/frame_history.h
@@ -0,0 +1,71 @@
+#ifndef ANDROID_DVR_FRAME_HISTORY_H_
+#define ANDROID_DVR_FRAME_HISTORY_H_
+
+#include <dvr/graphics.h>
+#include <pdx/file_handle.h>
+#include <private/dvr/ring_buffer.h>
+
+namespace android {
+namespace dvr {
+
+// FrameHistory tracks frame times from the start of rendering commands to when
+// the buffer is ready.
+class FrameHistory {
+ public:
+ FrameHistory();
+ explicit FrameHistory(int pending_frame_buffer_size);
+
+ void Reset(int pending_frame_buffer_size);
+
+ // Call when starting rendering commands (i.e. dvrBeginRenderFrame).
+ void OnFrameStart(uint32_t scheduled_vsync, int64_t scheduled_finish_ns);
+
+ // Call when rendering commands are finished (i.e. dvrPresent).
+ void OnFrameSubmit(android::pdx::LocalHandle&& fence);
+
+ // Call once per frame to see if any pending frames have finished.
+ void CheckForFinishedFrames();
+
+ // Uses the recently completed frame render times to predict how long the next
+ // frame will take, in vsync intervals. For example if the predicted frame
+ // time is 10ms and the vsync interval is 11ms, this will return 1. If the
+ // predicted frame time is 12ms and the vsync interval is 11ms, this will
+ // return 2.
+ int PredictNextFrameVsyncInterval(int64_t vsync_period_ns) const;
+
+ // Returns results for recently completed frames. Each frame's result is
+ // returned only once.
+ int GetPreviousFrameResults(DvrFrameScheduleResult* results,
+ int result_count);
+
+ // Gets the vsync count for the most recently started frame. If there are no
+ // started frames this will return UINT32_MAX.
+ uint32_t GetCurrentFrameVsync() const;
+
+ private:
+ struct PendingFrame {
+ int64_t start_ns;
+ uint32_t scheduled_vsync;
+ int64_t scheduled_finish_ns;
+ android::pdx::LocalHandle fence;
+
+ PendingFrame();
+ PendingFrame(int64_t start_ns, uint32_t scheduled_vsync,
+ int64_t scheduled_finish_ns,
+ android::pdx::LocalHandle&& fence);
+
+ PendingFrame(PendingFrame&&) = default;
+ PendingFrame& operator=(PendingFrame&&) = default;
+ PendingFrame(const PendingFrame&) = delete;
+ PendingFrame& operator=(const PendingFrame&) = delete;
+ };
+
+ RingBuffer<PendingFrame> pending_frames_;
+ RingBuffer<DvrFrameScheduleResult> finished_frames_;
+ RingBuffer<int64_t> frame_duration_history_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_FRAME_HISTORY_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/gl_fenced_flush.h b/libs/vr/libdisplay/include/private/dvr/gl_fenced_flush.h
new file mode 100644
index 0000000..1d75335
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/gl_fenced_flush.h
@@ -0,0 +1,17 @@
+#ifndef ANDROID_DVR_GL_FENCED_FLUSH_H_
+#define ANDROID_DVR_GL_FENCED_FLUSH_H_
+
+#include <EGL/egl.h>
+#include <pdx/file_handle.h>
+
+namespace android {
+namespace dvr {
+
+// Creates a EGL_SYNC_NATIVE_FENCE_ANDROID and flushes. Returns the fence as a
+// file descriptor.
+pdx::LocalHandle CreateGLSyncAndFlush(EGLDisplay display);
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_GL_FENCED_FLUSH_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/graphics_private.h b/libs/vr/libdisplay/include/private/dvr/graphics_private.h
new file mode 100644
index 0000000..57c99da
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/graphics_private.h
@@ -0,0 +1,39 @@
+#ifndef ANDROID_DVR_GRAPHICS_PRIVATE_H_
+#define ANDROID_DVR_GRAPHICS_PRIVATE_H_
+
+#ifdef __ARM_NEON
+#include <arm_neon.h>
+#else
+#ifndef __FLOAT32X4T_86
+#define __FLOAT32X4T_86
+typedef float float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef struct float32x4x4_t { float32x4_t val[4]; };
+#endif
+#endif
+
+#include <sys/cdefs.h>
+
+#include <dvr/graphics.h>
+
+__BEGIN_DECLS
+
+// Sets the pose used by the system for EDS. If dvrBeginRenderFrameEds() or
+// dvrBeginRenderFrameLateLatch() are called instead of dvrBeginRenderFrame()
+// it's not necessary to call this function. If this function is used, the call
+// must be made after dvrBeginRenderFrame() and before dvrPresent().
+//
+// @param[in] graphics_context The DvrGraphicsContext.
+// @param[in] render_pose_orientation Head pose orientation that rendering for
+// this frame will be based off of. This must be an unmodified value
+// from DvrPoseAsync, returned by dvrPoseGet.
+// @param[in] render_pose_translation Head pose translation that rendering for
+// this frame will be based off of. This must be an unmodified value
+// from DvrPoseAsync, returned by dvrPoseGet.
+// @return 0 on success or a negative error code on failure.
+int dvrSetEdsPose(DvrGraphicsContext* graphics_context,
+ float32x4_t render_pose_orientation,
+ float32x4_t render_pose_translation);
+
+__END_DECLS
+
+#endif // ANDROID_DVR_GRAPHICS_PRIVATE_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/late_latch.h b/libs/vr/libdisplay/include/private/dvr/late_latch.h
new file mode 100644
index 0000000..d0eff51
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/late_latch.h
@@ -0,0 +1,186 @@
+#ifndef ANDROID_DVR_LATE_LATCH_H_
+#define ANDROID_DVR_LATE_LATCH_H_
+
+#include <atomic>
+#include <thread>
+#include <vector>
+
+#include <dvr/pose_client.h>
+#include <pdx/file_handle.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/graphics/shader_program.h>
+#include <private/dvr/graphics/vr_gl_extensions.h>
+#include <private/dvr/types.h>
+
+struct DvrPose;
+
+namespace android {
+namespace dvr {
+
+// Input data for late latch compute shader.
+struct LateLatchInput {
+ // For app late latch:
+ mat4 eye_from_head_mat[kSurfaceViewMaxCount];
+ mat4 proj_mat[kSurfaceViewMaxCount];
+ mat4 pose_offset[kSurfaceViewMaxCount];
+ // For EDS late latch only:
+ mat4 eds_mat1[kSurfaceViewMaxCount];
+ mat4 eds_mat2[kSurfaceViewMaxCount];
+ // For both app and EDS late latch:
+ uint32_t pose_index;
+ uint32_t render_pose_index;
+};
+
+// Output data for late latch shader. The application can use all or part of
+// this data by calling LateLatch::BindUniformBuffer.
+// This struct matches the layout of DvrGraphicsLateLatchData.
+struct LateLatchOutput {
+ mat4 view_proj_matrix[kSurfaceViewMaxCount];
+ mat4 view_matrix[kSurfaceViewMaxCount];
+ vec4 pose_quaternion;
+ vec4 pose_translation;
+};
+
+// LateLatch provides a facility for GL workloads to acquire a late-adjusted
+// model-view projection matrix, adjusted based on the position/quaternion pose
+// read from a buffer that is being written to asynchronously. The adjusted
+// MVP matrix is written to a GL buffer object via GL transform feedback.
+class LateLatch {
+ public:
+ enum BufferType {
+ kViewProjMatrix,
+ kViewMatrix,
+ kPoseQuaternion,
+ kPoseTranslation,
+ // Max transform feedback count is 4, so no more buffers can go here.
+ kNumBuffers,
+ };
+
+ static size_t GetBufferSize(BufferType type) {
+ switch (type) {
+ default:
+ case kViewProjMatrix:
+ case kViewMatrix:
+ return 4 * 4 * sizeof(float);
+ case kPoseQuaternion:
+ case kPoseTranslation:
+ return 4 * sizeof(float);
+ }
+ }
+
+ static size_t GetBufferOffset(BufferType type, int view) {
+ switch (type) {
+ default:
+ case kViewProjMatrix:
+ return offsetof(LateLatchOutput, view_proj_matrix) +
+ GetBufferSize(type) * view;
+ case kViewMatrix:
+ return offsetof(LateLatchOutput, view_matrix) +
+ GetBufferSize(type) * view;
+ case kPoseQuaternion:
+ return offsetof(LateLatchOutput, pose_quaternion);
+ case kPoseTranslation:
+ return offsetof(LateLatchOutput, pose_translation);
+ }
+ }
+
+ explicit LateLatch(bool is_app_late_latch);
+ LateLatch(bool is_app_late_latch, pdx::LocalHandle&& surface_metadata_fd);
+ ~LateLatch();
+
+ // Bind the late-latch output data as a GL_UNIFORM_BUFFER. For example,
+ // to bind just the view_matrix from the output:
+ // BindUniformBuffer(BINDING, offsetof(LateLatchOutput, view_matrix),
+ // sizeof(mat4));
+ // buffer_index is the index of one of the output buffers if more than 1 were
+ // requested in the constructor.
+ void BindUniformBuffer(GLuint ubo_binding, size_t offset, size_t size) const {
+ glBindBufferRange(GL_UNIFORM_BUFFER, ubo_binding, output_buffer_id_, offset,
+ size);
+ }
+
+ void BindUniformBuffer(GLuint ubo_binding, BufferType type, int view) const {
+ glBindBufferRange(GL_UNIFORM_BUFFER, ubo_binding, output_buffer_id_,
+ GetBufferOffset(type, view), GetBufferSize(type));
+ }
+
+ GLuint output_buffer_id() const { return output_buffer_id_; }
+
+ void UnbindUniformBuffer(GLuint ubo_binding) const {
+ glBindBufferBase(GL_UNIFORM_BUFFER, ubo_binding, 0);
+ }
+
+ void CaptureOutputData(LateLatchOutput* data) const;
+
+ // Add the late latch GL commands for this frame. This should be done just
+ // before the first application draw calls that are dependent on the head
+ // latest head pose.
+ //
+ // For efficiency, the application projection and eye_from_head matrices are
+ // passed through the late latch shader and output in various combinations to
+ // allow for both simple application vertex shaders that can take the view-
+ // projection matrix as-is and shaders that need to access the view matrix
+ // separately.
+ //
+ // GL state must be reset to default for this call.
+ void AddLateLatch(const LateLatchInput& data) const;
+
+ // After calling AddEdsLateLatch one or more times, this method must be called
+ // to add the necessary GL memory barrier to ensure late latch outputs are
+ // written before the EDS and warp shaders read them.
+ void PostEdsLateLatchBarrier() const {
+ // The transform feedback buffer is going to be read as a uniform by EDS,
+ // so we need a uniform memory barrier.
+ glMemoryBarrier(GL_UNIFORM_BARRIER_BIT);
+ }
+
+ // Typically not for use by application code. This method adds the EDS late
+ // latch that will adjust the application framebuffer with the latest head
+ // pose.
+ // buffer_index is the index of one of the output buffers if more than 1 were
+ // requested in the constructor.
+ void AddEdsLateLatch(const LateLatchInput& data,
+ GLuint render_pose_buffer_object) const;
+
+ // For debugging purposes, capture the output during the next call to
+ // AddLateLatch. Set to NULL to reset.
+ void SetLateLatchDataCapture(LateLatchOutput* app_late_latch) {
+ app_late_latch_output_ = app_late_latch;
+ }
+
+ // For debugging purposes, capture the output during the next call to
+ // AddEdsLateLatch. Set to NULL to reset.
+ void SetEdsLateLatchDataCapture(LateLatchOutput* eds_late_latch) {
+ eds_late_latch_output_ = eds_late_latch;
+ }
+
+ private:
+ LateLatch(const LateLatch&) = delete;
+ LateLatch& operator=(const LateLatch&) = delete;
+
+ void LoadLateLatchShader();
+
+ // Late latch shader.
+ ShaderProgram late_latch_program_;
+
+ // Async pose ring buffer object.
+ GLuint pose_buffer_object_;
+
+ GLuint metadata_buffer_id_;
+
+ // Pose matrix buffers
+ GLuint input_buffer_id_;
+ GLuint output_buffer_id_;
+
+ bool is_app_late_latch_;
+ // During development, these can be used to capture the pose output data.
+ LateLatchOutput* app_late_latch_output_;
+ LateLatchOutput* eds_late_latch_output_;
+
+ DvrPose* pose_client_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_LATE_LATCH_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/native_buffer_queue.h b/libs/vr/libdisplay/include/private/dvr/native_buffer_queue.h
new file mode 100644
index 0000000..87e9c9f
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/native_buffer_queue.h
@@ -0,0 +1,73 @@
+#ifndef ANDROID_DVR_NATIVE_BUFFER_QUEUE_H_
+#define ANDROID_DVR_NATIVE_BUFFER_QUEUE_H_
+
+#include <semaphore.h>
+
+#include <mutex>
+#include <vector>
+
+#include <private/dvr/native_buffer.h>
+#include <private/dvr/ring_buffer.h>
+
+#include "display_client.h"
+
+namespace android {
+namespace dvr {
+
+// NativeBufferQueue manages a queue of NativeBufferProducers allocated from a
+// DisplaySurfaceClient. Buffers are automatically re-enqueued when released by
+// the consumer side.
+class NativeBufferQueue {
+ public:
+ // Create a queue with the given number of free buffers.
+ NativeBufferQueue(const std::shared_ptr<DisplaySurfaceClient>& surface,
+ size_t capacity);
+ NativeBufferQueue(EGLDisplay display,
+ const std::shared_ptr<DisplaySurfaceClient>& surface,
+ size_t capacity);
+ ~NativeBufferQueue();
+
+ std::shared_ptr<DisplaySurfaceClient> surface() const { return surface_; }
+
+ // Dequeue a buffer from the free queue, blocking until one is available.
+ NativeBufferProducer* Dequeue();
+
+ // Enqueue a buffer at the end of the free queue.
+ void Enqueue(NativeBufferProducer* buf);
+
+ // Get the number of free buffers in the queue.
+ size_t GetFreeBufferCount() const;
+
+ // Get the total number of buffers managed by this queue.
+ size_t GetQueueCapacity() const;
+
+ // Accessors for display surface buffer attributes.
+ int width() const { return surface_->width(); }
+ int height() const { return surface_->height(); }
+ int format() const { return surface_->format(); }
+ int usage() const { return surface_->usage(); }
+
+ private:
+ // Wait for buffers to be released and enqueue them.
+ bool WaitForBuffers();
+
+ std::shared_ptr<DisplaySurfaceClient> surface_;
+
+ // A list of strong pointers to the buffers, used for managing buffer
+ // lifetime.
+ std::vector<android::sp<NativeBufferProducer>> buffers_;
+
+ // Used to implement queue semantics.
+ RingBuffer<NativeBufferProducer*> buffer_queue_;
+
+ // Epoll fd used to wait for BufferHub events.
+ int epoll_fd_;
+
+ NativeBufferQueue(const NativeBufferQueue&) = delete;
+ void operator=(NativeBufferQueue&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_NATIVE_BUFFER_QUEUE_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/screenshot_client.h b/libs/vr/libdisplay/include/private/dvr/screenshot_client.h
new file mode 100644
index 0000000..b6fc859
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/screenshot_client.h
@@ -0,0 +1,42 @@
+#ifndef ANDROID_DVR_SCREENSHOT_CLIENT_H_
+#define ANDROID_DVR_SCREENSHOT_CLIENT_H_
+
+#include <memory>
+#include <vector>
+
+#include <pdx/client.h>
+#include <private/dvr/display_rpc.h>
+#include <system/graphics.h>
+
+namespace android {
+namespace dvr {
+
+// Represents a connection to the screenshot service, which allows capturing an
+// upcoming frame as it is being rendered to the display.
+class ScreenshotClient : public pdx::ClientBase<ScreenshotClient> {
+ public:
+ int format() const { return format_; }
+
+ // Attempts to take a screenshot. If successful, sets *data to the contents
+ // of the screenshot and returns zero. Otherwise, returns a negative error
+ // code.
+ // |index| is used to match the requested buffer with various buffer layers.
+ int Take(std::vector<uint8_t>* data, int index, int* return_width,
+ int* return_height);
+
+ private:
+ friend BASE;
+
+ ScreenshotClient();
+
+ // Layout information for screenshots.
+ int format_;
+
+ ScreenshotClient(const ScreenshotClient&) = delete;
+ void operator=(const ScreenshotClient&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SCREENSHOT_CLIENT_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h b/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h
new file mode 100644
index 0000000..a2659a6
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/video_mesh_surface_client.h
@@ -0,0 +1,42 @@
+#ifndef ANDROID_DVR_VIDEO_MESH_SURFACE_CLIENT_H_
+#define ANDROID_DVR_VIDEO_MESH_SURFACE_CLIENT_H_
+
+#include <base/macros.h>
+#include <private/dvr/buffer_hub_queue_client.h>
+#include <private/dvr/display_client.h>
+
+namespace android {
+namespace dvr {
+
+class VideoMeshSurfaceClient
+ : pdx::ClientBase<VideoMeshSurfaceClient, SurfaceClient> {
+ public:
+ using LocalChannelHandle = pdx::LocalChannelHandle;
+
+ // This call assumes ownership of |handle|.
+ static std::unique_ptr<VideoMeshSurfaceClient> Import(
+ LocalChannelHandle handle);
+
+ std::shared_ptr<ProducerQueue> GetProducerQueue();
+
+ // Get the shared memory metadata buffer for this video mesh surface. If it is
+ // not yet allocated, this will allocate it.
+ volatile VideoMeshSurfaceMetadata* GetMetadataBufferPtr();
+
+ private:
+ friend BASE;
+
+ std::shared_ptr<android::dvr::ProducerQueue> producer_queue_;
+ VideoMeshSurfaceMetadata* mapped_metadata_buffer_;
+
+ explicit VideoMeshSurfaceClient(LocalChannelHandle handle);
+};
+
+} // namespace dvr
+} // namespace android
+
+struct DvrVideoMeshSurface {
+ std::shared_ptr<android::dvr::VideoMeshSurfaceClient> client;
+};
+
+#endif // ANDROID_DVR_VIDEO_MESH_SURFACE_CLIENT_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/vsync_client.h b/libs/vr/libdisplay/include/private/dvr/vsync_client.h
new file mode 100644
index 0000000..32fa40f
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/vsync_client.h
@@ -0,0 +1,70 @@
+#ifndef ANDROID_DVR_VSYNC_CLIENT_H_
+#define ANDROID_DVR_VSYNC_CLIENT_H_
+
+#include <stdint.h>
+
+#include <pdx/client.h>
+#include <private/dvr/vsync_client_api.h>
+
+struct dvr_vsync_client {};
+
+namespace android {
+namespace dvr {
+
+/*
+ * VSyncClient is a remote interface to the vsync service in displayd.
+ * This class is used to wait for and retrieve information about the
+ * display vsync.
+ */
+class VSyncClient : public pdx::ClientBase<VSyncClient>,
+ public dvr_vsync_client {
+ public:
+ /*
+ * Wait for the next vsync signal.
+ * The timestamp (in ns) is written into *ts when ts is non-NULL.
+ */
+ int Wait(int64_t* timestamp_ns);
+
+ /*
+ * Returns the file descriptor used to communicate with the vsync system
+ * service or -1 on error.
+ */
+ int GetFd();
+
+ /*
+ * Clears the select/poll/epoll event so that subsequent calls to
+ * these will not signal until the next vsync.
+ */
+ int Acknowledge();
+
+ /*
+ * Get the timestamp of the last vsync event in ns. This call has
+ * the same side effect on events as Acknowledge(), which saves
+ * an IPC message.
+ */
+ int GetLastTimestamp(int64_t* timestamp_ns);
+
+ /*
+ * Get vsync scheduling info.
+ * Get the estimated timestamp of the next GPU lens warp preemption event in
+ * ns. Also returns the corresponding vsync count that the next lens warp
+ * operation will target. This call has the same side effect on events as
+ * Acknowledge(), which saves an IPC message.
+ */
+ int GetSchedInfo(int64_t* vsync_period_ns, int64_t* next_timestamp_ns,
+ uint32_t* next_vsync_count);
+
+ private:
+ friend BASE;
+
+ VSyncClient();
+ explicit VSyncClient(long timeout_ms);
+
+ VSyncClient(const VSyncClient&) = delete;
+ void operator=(const VSyncClient&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_VSYNC_CLIENT_H_
diff --git a/libs/vr/libdisplay/include/private/dvr/vsync_client_api.h b/libs/vr/libdisplay/include/private/dvr/vsync_client_api.h
new file mode 100644
index 0000000..4cdbc71
--- /dev/null
+++ b/libs/vr/libdisplay/include/private/dvr/vsync_client_api.h
@@ -0,0 +1,44 @@
+#ifndef ANDROID_DVR_VSYNC_CLIENT_API_H_
+#define ANDROID_DVR_VSYNC_CLIENT_API_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// A client of the vsync service.
+//
+// The "dvr_vsync_client" structure wraps a client connection to the
+// system vsync service. It is used to synchronize application drawing
+// with the scanout of the display.
+typedef struct dvr_vsync_client dreamos_vsync_client;
+
+// Creates a new client to the system vsync service.
+dvr_vsync_client* dvr_vsync_client_create();
+
+// Destroys the vsync client.
+void dvr_vsync_client_destroy(dvr_vsync_client* client);
+
+// Blocks until the next vsync signal.
+// The timestamp (in ns) is written into |*timestamp_ns| when it is non-NULL.
+// Returns 0 upon success, or -errno.
+int dvr_vsync_client_wait(dvr_vsync_client* client, int64_t* timestamp_ns);
+
+// Returns the file descriptor used to communicate with the vsync service.
+int dvr_vsync_client_get_fd(dvr_vsync_client* client);
+
+// Clears the select/poll/epoll event so that subsequent calls to these
+// will not signal until the next vsync.
+int dvr_vsync_client_acknowledge(dvr_vsync_client* client);
+
+// Gets the timestamp of the last vsync signal in ns. This call has the
+// same side effects on events as acknowledge.
+int dvr_vsync_client_get_last_timestamp(dvr_vsync_client* client,
+ int64_t* timestamp_ns);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // ANDROID_DVR_VSYNC_CLIENT_API_H_
diff --git a/libs/vr/libdisplay/late_latch.cpp b/libs/vr/libdisplay/late_latch.cpp
new file mode 100644
index 0000000..3681e10
--- /dev/null
+++ b/libs/vr/libdisplay/late_latch.cpp
@@ -0,0 +1,461 @@
+#include "include/private/dvr/late_latch.h"
+
+#include <unistd.h>
+
+#include <fstream>
+#include <iostream>
+#include <string>
+
+#include <base/logging.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/debug.h>
+#include <private/dvr/graphics/gpu_profiler.h>
+#include <private/dvr/pose_client_internal.h>
+#include <private/dvr/sensor_constants.h>
+#include <private/dvr/types.h>
+
+#define PRINT_MATRIX 0
+
+#if PRINT_MATRIX
+#ifndef LOG_TAG
+#define LOG_TAG "latelatch"
+#endif
+#include <cutils/log.h>
+
+#define PE(str, ...) \
+ fprintf(stderr, "[%s:%d] " str, __FILE__, __LINE__, ##__VA_ARGS__); \
+ ALOGI("[%s:%d] " str, __FILE__, __LINE__, ##__VA_ARGS__)
+
+#define PV4(v) PE(#v "=%f,%f,%f,%f\n", v[0], v[1], v[2], v[3]);
+#define PM4(m) \
+ PE(#m ":\n %f,%f,%f,%f\n %f,%f,%f,%f\n %f,%f,%f,%f\n %f,%f,%f,%f\n", \
+ m(0, 0), m(0, 1), m(0, 2), m(0, 3), m(1, 0), m(1, 1), m(1, 2), m(1, 3), \
+ m(2, 0), m(2, 1), m(2, 2), m(2, 3), m(3, 0), m(3, 1), m(3, 2), m(3, 3))
+#endif // PRINT_MATRIX
+
+#define STRINGIFY2(s) #s
+#define STRINGIFY(s) STRINGIFY2(s)
+
+// Compute shader bindings.
+// GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS must be at least 8 for GLES 3.1.
+#define POSE_BINDING 0
+#define RENDER_POSE_BINDING 1
+#define INPUT_BINDING 2
+#define OUTPUT_BINDING 3
+
+using android::pdx::LocalHandle;
+
+namespace {
+
+static const std::string kShaderLateLatch = R"( // NOLINT
+ struct Pose {
+ vec4 quat;
+ vec3 pos;
+ };
+
+ // Must match DvrPoseAsync C struct.
+ struct DvrPoseAsync {
+ vec4 orientation;
+ vec4 translation;
+ vec4 right_orientation;
+ vec4 right_translation;
+ vec4 angular_velocity;
+ vec4 velocity;
+ vec4 reserved[2];
+ };
+
+ // Must match LateLatchInputData C struct.
+ layout(binding = INPUT_BINDING, std140)
+ buffer InputData {
+ mat4 uEyeFromHeadMat[kSurfaceViewMaxCount];
+ mat4 uProjMat[kSurfaceViewMaxCount];
+ mat4 uPoseOffset[kSurfaceViewMaxCount];
+ mat4 uEdsMat1[kSurfaceViewMaxCount];
+ mat4 uEdsMat2[kSurfaceViewMaxCount];
+ uint uPoseIndex;
+ uint uRenderPoseIndex;
+ } bIn;
+
+ // std140 is to layout the structure in a consistent, standard way so we
+ // can access it from C++.
+ // This structure exactly matches the pose ring buffer in pose_client.h.
+ layout(binding = POSE_BINDING, std140)
+ buffer PoseBuffer {
+ DvrPoseAsync data[kPoseAsyncBufferTotalCount];
+ } bPose;
+
+ // Must stay in sync with DisplaySurfaceMetadata C struct.
+ // GPU thread 0 will exclusively read in a pose and capture it
+ // into this array.
+ layout(binding = RENDER_POSE_BINDING, std140)
+ buffer DisplaySurfaceMetadata {
+ vec4 orientation[kSurfaceBufferMaxCount];
+ vec4 translation[kSurfaceBufferMaxCount];
+ } bSurfaceData;
+
+ // Must stay in sync with DisplaySurfaceMetadata C struct.
+ // Each thread writes to a vertic
+ layout(binding = OUTPUT_BINDING, std140)
+ buffer Output {
+ mat4 viewProjMatrix[kSurfaceViewMaxCount];
+ mat4 viewMatrix[kSurfaceViewMaxCount];
+ vec4 quaternion;
+ vec4 translation;
+ } bOut;
+
+ // Thread 0 will also store the single quat/pos pair in shared variables
+ // for the other threads to use (left and right eye in this array).
+ shared Pose sharedPose[2];
+
+ // Rotate v1 by the given quaternion. This is based on mathfu's
+ // Quaternion::Rotate function. It is the typical implementation of this
+ // operation. Eigen has a similar method (Quaternion::_transformVector) that
+ // supposedly requires fewer operations, but I am skeptical of optimizing
+ // shader code without proper profiling first.
+ vec3 rotate(vec4 quat, vec3 v1) {
+ float ss = 2.0 * quat.w;
+ vec3 v = quat.xyz;
+ return ss * cross(v, v1) + (ss * quat.w - 1.0) * v1 +
+ 2.0 * dot(v, v1) * v;
+ }
+
+ // See Eigen Quaternion::conjugate;
+ // Note that this isn't a true multiplicative inverse unless you can guarantee
+ // quat is also normalized, but that typically isn't an issue for our
+ // purposes.
+ vec4 quatInvert(vec4 quat) {
+ return vec4(-quat.xyz, quat.w);
+ }
+
+ // This is based on mathfu's Quaternion::operator*(Quaternion)
+ // Eigen's version is mathematically equivalent, just notationally different.
+ vec4 quatMul(vec4 q1, vec4 q2) {
+ return vec4(q1.w * q2.xyz + q2.w * q1.xyz + cross(q1.xyz, q2.xyz),
+ q1.w * q2.w - dot(q1.xyz, q2.xyz));
+ }
+
+ // Equivalent to pose.h GetObjectFromReferenceMatrix.
+ mat4 getInverseMatrix(Pose pose) {
+ // Invert quaternion and store fields the way Eigen does so we can
+ // keep in sync with Eigen methods easier.
+ vec4 quatInv = quatInvert(pose.quat);
+ vec3 v = quatInv.xyz;
+ float s = quatInv.w;
+ // Convert quaternion to matrix. See Eigen Quaternion::toRotationMatrix()
+ float x2 = v.x * v.x, y2 = v.y * v.y, z2 = v.z * v.z;
+ float sx = s * v.x, sy = s * v.y, sz = s * v.z;
+ float xz = v.x * v.z, yz = v.y * v.z, xy = v.x * v.y;
+ // Inverse translation.
+ vec3 point = -pose.pos;
+
+ return
+ mat4(1.0 - 2.0 * (y2 + z2), 2.0 * (xy + sz), 2.0 * (xz - sy), 0.0,
+ 2.0 * (xy - sz), 1.0 - 2.0 * (x2 + z2), 2.0 * (sx + yz), 0.0,
+ 2.0 * (sy + xz), 2.0 * (yz - sx), 1.0 - 2.0 * (x2 + y2), 0.0,
+ 0.0, 0.0, 0.0, 1.0)*
+ mat4(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
+ point.x, point.y, point.z, 1.0);
+ }
+
+ void appLateLatch() {
+ uint poseIndex = (gl_LocalInvocationIndex & uint(1));
+ mat4 head_from_center = getInverseMatrix(sharedPose[poseIndex]);
+ bOut.viewMatrix[gl_LocalInvocationIndex] =
+ bIn.uEyeFromHeadMat[gl_LocalInvocationIndex] *
+ head_from_center * bIn.uPoseOffset[gl_LocalInvocationIndex];
+ bOut.viewProjMatrix[gl_LocalInvocationIndex] =
+ bIn.uProjMat[gl_LocalInvocationIndex] *
+ bOut.viewMatrix[gl_LocalInvocationIndex];
+ }
+
+ // Extract the app frame's pose.
+ Pose getPoseFromApp() {
+ Pose p;
+ p.quat = bSurfaceData.orientation[bIn.uRenderPoseIndex];
+ p.pos = bSurfaceData.translation[bIn.uRenderPoseIndex].xyz;
+ return p;
+ }
+
+ // See Posef::GetPoseOffset.
+ Pose getPoseOffset(Pose p1, Pose p2) {
+ Pose p;
+ p.quat = quatMul(quatInvert(p2.quat), p1.quat);
+ // TODO(jbates) Consider enabling positional EDS when it is better
+ // tested.
+ // p.pos = p2.pos - p1.pos;
+ p.pos = vec3(0.0);
+ return p;
+ }
+
+ void edsLateLatch() {
+ Pose pose1 = getPoseFromApp();
+ Pose correction;
+ // Ignore the texture pose if the quat is not unit-length.
+ float tex_quat_length = length(pose1.quat);
+ uint poseIndex = (gl_LocalInvocationIndex & uint(1));
+ if (abs(tex_quat_length - 1.0) < 0.001)
+ correction = getPoseOffset(pose1, sharedPose[poseIndex]);
+ else
+ correction = Pose(vec4(0, 0, 0, 1), vec3(0, 0, 0));
+ mat4 eye_old_from_eye_new_matrix = getInverseMatrix(correction);
+ bOut.viewProjMatrix[gl_LocalInvocationIndex] =
+ bIn.uEdsMat1[gl_LocalInvocationIndex] *
+ eye_old_from_eye_new_matrix * bIn.uEdsMat2[gl_LocalInvocationIndex];
+ // Currently unused, except for debugging:
+ bOut.viewMatrix[gl_LocalInvocationIndex] = eye_old_from_eye_new_matrix;
+ }
+
+ // One thread per surface view.
+ layout (local_size_x = kSurfaceViewMaxCount, local_size_y = 1,
+ local_size_z = 1) in;
+
+ void main() {
+ // First, thread 0 late latches pose and stores it into various places.
+ if (gl_LocalInvocationIndex == uint(0)) {
+ sharedPose[0].quat = bPose.data[bIn.uPoseIndex].orientation;
+ sharedPose[0].pos = bPose.data[bIn.uPoseIndex].translation.xyz;
+ sharedPose[1].quat = bPose.data[bIn.uPoseIndex].right_orientation;
+ sharedPose[1].pos = bPose.data[bIn.uPoseIndex].right_translation.xyz;
+ if (IS_APP_LATE_LATCH) {
+ bSurfaceData.orientation[bIn.uRenderPoseIndex] = sharedPose[0].quat;
+ bSurfaceData.translation[bIn.uRenderPoseIndex] = vec4(sharedPose[0].pos, 0.0);
+ // TODO(jbates) implement app late-latch support for separate eye poses.
+ // App late latch currently uses the same pose for both eye views.
+ sharedPose[1] = sharedPose[0];
+ }
+ bOut.quaternion = sharedPose[0].quat;
+ bOut.translation = vec4(sharedPose[0].pos, 0.0);
+ }
+
+ // Memory barrier to make sure all threads can see prior writes.
+ memoryBarrierShared();
+
+ // Execution barrier to block all threads here until all threads have
+ // reached this point -- ensures the late latching is done.
+ barrier();
+
+ if (IS_APP_LATE_LATCH)
+ appLateLatch();
+ else
+ edsLateLatch();
+ }
+)";
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+LateLatch::LateLatch(bool is_app_late_latch)
+ : LateLatch(is_app_late_latch, LocalHandle()) {}
+
+LateLatch::LateLatch(bool is_app_late_latch,
+ LocalHandle&& surface_metadata_fd)
+ : is_app_late_latch_(is_app_late_latch),
+ app_late_latch_output_(NULL),
+ eds_late_latch_output_(NULL) {
+ CHECK_GL();
+ glGenBuffers(1, &input_buffer_id_);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, input_buffer_id_);
+ glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(LateLatchInput), nullptr,
+ GL_DYNAMIC_DRAW);
+ glGenBuffers(1, &output_buffer_id_);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, output_buffer_id_);
+ glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(LateLatchOutput), nullptr,
+ GL_DYNAMIC_COPY);
+ CHECK_GL();
+
+ LocalHandle pose_buffer_fd;
+ pose_client_ = dvrPoseCreate();
+ if (!pose_client_) {
+ LOG(ERROR) << "LateLatch Error: failed to create pose client";
+ } else {
+ int ret = privateDvrPoseGetRingBufferFd(pose_client_, &pose_buffer_fd);
+ if (ret < 0) {
+ LOG(ERROR) << "LateLatch Error: failed to get pose ring buffer";
+ }
+ }
+
+ glGenBuffers(1, &pose_buffer_object_);
+ glGenBuffers(1, &metadata_buffer_id_);
+ if (!glBindSharedBufferQCOM) {
+ LOG(ERROR) << "Error: Missing gralloc buffer extension, no pose data";
+ } else {
+ if (pose_buffer_fd) {
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, pose_buffer_object_);
+ glBindSharedBufferQCOM(GL_SHADER_STORAGE_BUFFER,
+ kPoseAsyncBufferTotalCount * sizeof(DvrPoseAsync),
+ pose_buffer_fd.Release());
+ }
+ CHECK_GL();
+ }
+
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, metadata_buffer_id_);
+ if (surface_metadata_fd && glBindSharedBufferQCOM) {
+ glBindSharedBufferQCOM(GL_SHADER_STORAGE_BUFFER,
+ sizeof(DisplaySurfaceMetadata),
+ surface_metadata_fd.Release());
+ } else {
+ // Fall back on internal metadata buffer when none provided, for example
+ // when distortion is done in the application process.
+ glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(DisplaySurfaceMetadata),
+ nullptr, GL_DYNAMIC_COPY);
+ }
+ CHECK_GL();
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
+
+ CHECK_GL();
+ LoadLateLatchShader();
+}
+
+LateLatch::~LateLatch() {
+ glDeleteBuffers(1, &metadata_buffer_id_);
+ glDeleteBuffers(1, &input_buffer_id_);
+ glDeleteBuffers(1, &output_buffer_id_);
+ glDeleteBuffers(1, &pose_buffer_object_);
+ dvrPoseDestroy(pose_client_);
+}
+
+void LateLatch::LoadLateLatchShader() {
+ std::string str;
+ str += "\n#define POSE_BINDING " STRINGIFY(POSE_BINDING);
+ str += "\n#define RENDER_POSE_BINDING " STRINGIFY(RENDER_POSE_BINDING);
+ str += "\n#define INPUT_BINDING " STRINGIFY(INPUT_BINDING);
+ str += "\n#define OUTPUT_BINDING " STRINGIFY(OUTPUT_BINDING);
+ str += "\n#define kPoseAsyncBufferTotalCount " STRINGIFY(
+ kPoseAsyncBufferTotalCount);
+ str += "\n#define kSurfaceBufferMaxCount " STRINGIFY(kSurfaceBufferMaxCount);
+ str += "\n#define kSurfaceBufferMaxCount " STRINGIFY(kSurfaceBufferMaxCount);
+ str += "\n#define kSurfaceViewMaxCount " STRINGIFY(kSurfaceViewMaxCount);
+ str += "\n#define IS_APP_LATE_LATCH ";
+ str += is_app_late_latch_ ? "true" : "false";
+ str += "\n";
+ str += kShaderLateLatch;
+ late_latch_program_.Link(str);
+ CHECK_GL();
+}
+
+void LateLatch::CaptureOutputData(LateLatchOutput* data) const {
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, output_buffer_id_);
+ LateLatchOutput* out_data = static_cast<LateLatchOutput*>(glMapBufferRange(
+ GL_SHADER_STORAGE_BUFFER, 0, sizeof(LateLatchOutput), GL_MAP_READ_BIT));
+ *data = *out_data;
+ glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
+ CHECK_GL();
+}
+
+void LateLatch::AddLateLatch(const LateLatchInput& data) const {
+ CHECK(is_app_late_latch_);
+ CHECK_GL();
+ late_latch_program_.Use();
+
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING,
+ metadata_buffer_id_);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, pose_buffer_object_);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, output_buffer_id_);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, input_buffer_id_);
+ LateLatchInput* adata = (LateLatchInput*)glMapBufferRange(
+ GL_SHADER_STORAGE_BUFFER, 0, sizeof(LateLatchInput),
+ GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
+ if (adata)
+ *adata = data;
+ else
+ LOG(ERROR) << "Error: LateLatchInput gl mapping is null";
+ glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, input_buffer_id_);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
+ CHECK_GL();
+
+ // The output buffer is going to be written but it may be read by
+ // earlier shaders, so we need a shader storage memory barrier.
+ glMemoryBarrier(GL_SHADER_STORAGE_BUFFER);
+
+ glDispatchCompute(1, 1, 1);
+ CHECK_GL();
+
+ // The transform feedback buffer is going to be read as a uniform by the app,
+ // so we need a uniform memory barrier.
+ glMemoryBarrier(GL_UNIFORM_BARRIER_BIT);
+
+ if (app_late_latch_output_) {
+ // Capture the output data:
+ CaptureOutputData(app_late_latch_output_);
+ }
+#if PRINT_MATRIX
+ // Print the composed matrix to stderr:
+ LateLatchOutput out_data;
+ CaptureOutputData(&out_data);
+ CHECK_GL();
+ PE("LL APP slot:%d\n", data.render_pose_index);
+ PM4(data.proj_mat[0]);
+ PM4(out_data.view_proj_matrix[0]);
+ PM4(out_data.view_proj_matrix[1]);
+ PM4(out_data.view_proj_matrix[2]);
+ PM4(out_data.view_proj_matrix[3]);
+ PM4(out_data.view_matrix[0]);
+ PM4(out_data.view_matrix[1]);
+ PM4(out_data.view_matrix[2]);
+ PM4(out_data.view_matrix[3]);
+ PV4(out_data.pose_quaternion);
+ PV4(out_data.pose_translation);
+#endif
+
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, 0);
+ glUseProgram(0);
+}
+
+void LateLatch::AddEdsLateLatch(const LateLatchInput& data,
+ GLuint render_pose_buffer_object) const {
+ CHECK(!is_app_late_latch_);
+ late_latch_program_.Use();
+
+ // Fall back on internal buffer when none is provided.
+ if (!render_pose_buffer_object)
+ render_pose_buffer_object = metadata_buffer_id_;
+
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING,
+ render_pose_buffer_object);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, pose_buffer_object_);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, output_buffer_id_);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, input_buffer_id_);
+ LateLatchInput* adata = (LateLatchInput*)glMapBufferRange(
+ GL_SHADER_STORAGE_BUFFER, 0, sizeof(LateLatchInput),
+ GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
+ *adata = data;
+ glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
+ glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, input_buffer_id_);
+ CHECK_GL();
+
+ glDispatchCompute(1, 1, 1);
+ CHECK_GL();
+
+ if (eds_late_latch_output_) {
+ // Capture the output data:
+ CaptureOutputData(eds_late_latch_output_);
+ }
+#if PRINT_MATRIX
+ // Print the composed matrix to stderr:
+ LateLatchOutput out_data;
+ CaptureOutputData(&out_data);
+ CHECK_GL();
+ PE("LL EDS\n");
+ PM4(out_data.view_proj_matrix[0]);
+ PM4(out_data.view_matrix[0]);
+ PV4(out_data.pose_quaternion);
+ PV4(out_data.pose_translation);
+#endif
+
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, RENDER_POSE_BINDING, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, POSE_BINDING, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, OUTPUT_BINDING, 0);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, INPUT_BINDING, 0);
+ glUseProgram(0);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/native_buffer_queue.cpp b/libs/vr/libdisplay/native_buffer_queue.cpp
new file mode 100644
index 0000000..2d1e23d
--- /dev/null
+++ b/libs/vr/libdisplay/native_buffer_queue.cpp
@@ -0,0 +1,152 @@
+#include "include/private/dvr/native_buffer_queue.h"
+
+#include <base/logging.h>
+#include <cutils/log.h>
+#include <sys/epoll.h>
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <utils/Trace.h>
+
+#include <array>
+
+#include <private/dvr/display_types.h>
+
+namespace android {
+namespace dvr {
+
+NativeBufferQueue::NativeBufferQueue(
+ const std::shared_ptr<DisplaySurfaceClient>& surface, size_t capacity)
+ : NativeBufferQueue(nullptr, surface, capacity) {}
+
+NativeBufferQueue::NativeBufferQueue(
+ EGLDisplay display, const std::shared_ptr<DisplaySurfaceClient>& surface,
+ size_t capacity)
+ : surface_(surface),
+ buffers_(capacity),
+ buffer_queue_(capacity) {
+ CHECK(surface);
+
+ epoll_fd_ = epoll_create(64);
+ if (epoll_fd_ < 0) {
+ ALOGE("NativeBufferQueue::NativeBufferQueue: Failed to create epoll fd: %s",
+ strerror(errno));
+ return;
+ }
+
+ // The kSurfaceBufferMaxCount must be >= the capacity so that shader code
+ // can bind surface buffer array data.
+ CHECK(kSurfaceBufferMaxCount >= capacity);
+
+ for (size_t i = 0; i < capacity; i++) {
+ uint32_t buffer_index = 0;
+ auto buffer = surface_->AllocateBuffer(&buffer_index);
+ if (!buffer) {
+ ALOGE("NativeBufferQueue::NativeBufferQueue: Failed to allocate buffer!");
+ return;
+ }
+
+ // TODO(jbates): store an index associated with each buffer so that we can
+ // determine which index in DisplaySurfaceMetadata it is associated
+ // with.
+ buffers_.push_back(new NativeBufferProducer(buffer, display, buffer_index));
+ NativeBufferProducer* native_buffer = buffers_.back().get();
+
+ epoll_event event = {.events = EPOLLIN | EPOLLET,
+ .data = {.ptr = native_buffer}};
+ if (epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, buffer->event_fd(), &event) <
+ 0) {
+ ALOGE(
+ "NativeBufferQueue::NativeBufferQueue: Failed to add buffer producer "
+ "to epoll set: %s",
+ strerror(errno));
+ return;
+ }
+
+ Enqueue(native_buffer);
+ }
+}
+
+NativeBufferQueue::~NativeBufferQueue() {
+ if (epoll_fd_ >= 0)
+ close(epoll_fd_);
+}
+
+bool NativeBufferQueue::WaitForBuffers() {
+ ATRACE_NAME("NativeBufferQueue::WaitForBuffers");
+ // Intentionally set this to one so that we don't waste time retrieving too
+ // many buffers.
+ constexpr size_t kMaxEvents = 1;
+ std::array<epoll_event, kMaxEvents> events;
+
+ while (buffer_queue_.IsEmpty()) {
+ int num_events = epoll_wait(epoll_fd_, events.data(), events.size(), -1);
+ if (num_events < 0 && errno != EINTR) {
+ ALOGE("NativeBufferQueue:WaitForBuffers: Failed to wait for buffers: %s",
+ strerror(errno));
+ return false;
+ }
+
+ ALOGD_IF(TRACE, "NativeBufferQueue::WaitForBuffers: num_events=%d",
+ num_events);
+
+ for (int i = 0; i < num_events; i++) {
+ NativeBufferProducer* buffer =
+ static_cast<NativeBufferProducer*>(events[i].data.ptr);
+ ALOGD_IF(TRACE,
+ "NativeBufferQueue::WaitForBuffers: event %d: buffer_id=%d "
+ "events=0x%x",
+ i, buffer->buffer()->id(), events[i].events);
+
+ if (events[i].events & EPOLLIN) {
+ const int ret = buffer->GainAsync();
+ if (ret < 0) {
+ ALOGE("NativeBufferQueue::WaitForBuffers: Failed to gain buffer: %s",
+ strerror(-ret));
+ continue;
+ }
+
+ Enqueue(buffer);
+ }
+ }
+ }
+
+ return true;
+}
+
+void NativeBufferQueue::Enqueue(NativeBufferProducer* buf) {
+ ATRACE_NAME("NativeBufferQueue::Enqueue");
+ if (buffer_queue_.IsFull()) {
+ ALOGE("NativeBufferQueue::Enqueue: Queue is full!");
+ return;
+ }
+
+ buffer_queue_.Append(buf);
+}
+
+NativeBufferProducer* NativeBufferQueue::Dequeue() {
+ ATRACE_NAME("NativeBufferQueue::Dequeue");
+ ALOGD_IF(TRACE, "NativeBufferQueue::Dequeue: count=%zd",
+ buffer_queue_.GetSize());
+
+ if (buffer_queue_.IsEmpty() && !WaitForBuffers())
+ return nullptr;
+
+ NativeBufferProducer* buf = buffer_queue_.Front();
+ buffer_queue_.PopFront();
+ if (buf == nullptr) {
+ ALOGE("NativeBufferQueue::Dequeue: Buffer at tail was nullptr!!!");
+ return nullptr;
+ }
+
+ return buf;
+}
+
+size_t NativeBufferQueue::GetFreeBufferCount() const {
+ return buffer_queue_.GetSize();
+}
+
+size_t NativeBufferQueue::GetQueueCapacity() const {
+ return buffer_queue_.GetCapacity();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/native_window.cpp b/libs/vr/libdisplay/native_window.cpp
new file mode 100644
index 0000000..63c81ed
--- /dev/null
+++ b/libs/vr/libdisplay/native_window.cpp
@@ -0,0 +1,459 @@
+#include <EGL/egl.h>
+
+#include <android/native_window.h>
+#include <base/logging.h>
+#include <cutils/native_handle.h>
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdarg.h>
+#include <string.h>
+#include <sys/timerfd.h>
+#include <system/window.h>
+#include <time.h>
+#include <ui/ANativeObjectBase.h>
+#include <utils/Errors.h>
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <utils/Trace.h>
+
+#include <cutils/log.h>
+
+#include <memory>
+#include <mutex>
+
+#include <dvr/graphics.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/display_client.h>
+#include <private/dvr/native_buffer.h>
+#include <private/dvr/native_buffer_queue.h>
+
+namespace {
+
+constexpr int kDefaultDisplaySurfaceUsage =
+ GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
+constexpr int kDefaultDisplaySurfaceFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+constexpr int kWarpedDisplaySurfaceFlags = 0;
+constexpr int kUnwarpedDisplaySurfaceFlags =
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_EDS |
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION |
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_CAC;
+constexpr int kDefaultBufferCount = 4;
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+// NativeWindow is an implementation of ANativeWindow. This class interacts with
+// displayd through the DisplaySurfaceClient and NativeBufferQueue.
+class NativeWindow : public ANativeObjectBase<ANativeWindow, NativeWindow,
+ LightRefBase<NativeWindow> > {
+ public:
+ explicit NativeWindow(const std::shared_ptr<DisplaySurfaceClient>& surface);
+
+ void SetVisible(bool visible);
+ void SetZOrder(int z_order);
+ void PostEarly();
+
+ private:
+ friend class LightRefBase<NativeWindow>;
+
+ void Post(sp<NativeBufferProducer> buffer, int fence_fd);
+
+ static int SetSwapInterval(ANativeWindow* window, int interval);
+ static int DequeueBuffer(ANativeWindow* window, ANativeWindowBuffer** buffer,
+ int* fence_fd);
+ static int QueueBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
+ int fence_fd);
+ static int CancelBuffer(ANativeWindow* window, ANativeWindowBuffer* buffer,
+ int fence_fd);
+ static int Query(const ANativeWindow* window, int what, int* value);
+ static int Perform(ANativeWindow* window, int operation, ...);
+
+ static int DequeueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer** buffer);
+ static int CancelBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer);
+ static int QueueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer);
+ static int LockBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer);
+
+ std::shared_ptr<DisplaySurfaceClient> surface_;
+
+ std::mutex lock_;
+ NativeBufferQueue buffer_queue_;
+ sp<NativeBufferProducer> next_post_buffer_;
+ bool next_buffer_already_posted_;
+
+ NativeWindow(const NativeWindow&) = delete;
+ void operator=(NativeWindow&) = delete;
+};
+
+NativeWindow::NativeWindow(const std::shared_ptr<DisplaySurfaceClient>& surface)
+ : surface_(surface),
+ buffer_queue_(surface, kDefaultBufferCount),
+ next_post_buffer_(nullptr),
+ next_buffer_already_posted_(false) {
+ ANativeWindow::setSwapInterval = SetSwapInterval;
+ ANativeWindow::dequeueBuffer = DequeueBuffer;
+ ANativeWindow::cancelBuffer = CancelBuffer;
+ ANativeWindow::queueBuffer = QueueBuffer;
+ ANativeWindow::query = Query;
+ ANativeWindow::perform = Perform;
+
+ ANativeWindow::dequeueBuffer_DEPRECATED = DequeueBuffer_DEPRECATED;
+ ANativeWindow::cancelBuffer_DEPRECATED = CancelBuffer_DEPRECATED;
+ ANativeWindow::lockBuffer_DEPRECATED = LockBuffer_DEPRECATED;
+ ANativeWindow::queueBuffer_DEPRECATED = QueueBuffer_DEPRECATED;
+}
+
+void NativeWindow::SetVisible(bool visible) { surface_->SetVisible(visible); }
+
+void NativeWindow::SetZOrder(int z_order) { surface_->SetZOrder(z_order); }
+
+void NativeWindow::PostEarly() {
+ ATRACE_NAME("NativeWindow::PostEarly");
+ ALOGI_IF(TRACE, "NativeWindow::PostEarly");
+
+ std::lock_guard<std::mutex> autolock(lock_);
+
+ if (!next_buffer_already_posted_) {
+ next_buffer_already_posted_ = true;
+
+ if (!next_post_buffer_.get()) {
+ next_post_buffer_ = buffer_queue_.Dequeue();
+ }
+ ATRACE_ASYNC_BEGIN("BufferPost", next_post_buffer_->buffer()->id());
+ Post(next_post_buffer_, -1);
+ }
+}
+
+void NativeWindow::Post(sp<NativeBufferProducer> buffer, int fence_fd) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+ ALOGI_IF(TRACE, "NativeWindow::Post: buffer_id=%d, fence_fd=%d",
+ buffer->buffer()->id(), fence_fd);
+ ALOGW_IF(!surface_->visible(),
+ "NativeWindow::Post: Posting buffer on invisible surface!!!");
+ buffer->Post(fence_fd, 0);
+}
+
+int NativeWindow::SetSwapInterval(ANativeWindow* window, int interval) {
+ ALOGI_IF(TRACE, "SetSwapInterval: window=%p interval=%d", window, interval);
+ return 0;
+}
+
+int NativeWindow::DequeueBuffer(ANativeWindow* window,
+ ANativeWindowBuffer** buffer, int* fence_fd) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+
+ NativeWindow* self = getSelf(window);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ if (!self->next_post_buffer_.get()) {
+ self->next_post_buffer_ = self->buffer_queue_.Dequeue();
+ }
+ ATRACE_ASYNC_BEGIN("BufferDraw", self->next_post_buffer_->buffer()->id());
+ *fence_fd = self->next_post_buffer_->ClaimReleaseFence().Release();
+ *buffer = self->next_post_buffer_.get();
+
+ ALOGI_IF(TRACE, "NativeWindow::DequeueBuffer: fence_fd=%d", *fence_fd);
+ return 0;
+}
+
+int NativeWindow::QueueBuffer(ANativeWindow* window,
+ ANativeWindowBuffer* buffer, int fence_fd) {
+ ATRACE_NAME("NativeWindow::QueueBuffer");
+ ALOGI_IF(TRACE, "NativeWindow::QueueBuffer: fence_fd=%d", fence_fd);
+
+ NativeWindow* self = getSelf(window);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ NativeBufferProducer* native_buffer =
+ static_cast<NativeBufferProducer*>(buffer);
+ ATRACE_ASYNC_END("BufferDraw", native_buffer->buffer()->id());
+ bool do_post = true;
+ if (self->next_buffer_already_posted_) {
+ // Check that the buffer is the one we expect, but handle it if this happens
+ // in production by allowing this buffer to post on top of the previous one.
+ DCHECK(native_buffer == self->next_post_buffer_.get());
+ if (native_buffer == self->next_post_buffer_.get()) {
+ do_post = false;
+ if (fence_fd >= 0)
+ close(fence_fd);
+ }
+ }
+ if (do_post) {
+ ATRACE_ASYNC_BEGIN("BufferPost", native_buffer->buffer()->id());
+ self->Post(native_buffer, fence_fd);
+ }
+ self->next_buffer_already_posted_ = false;
+ self->next_post_buffer_ = nullptr;
+
+ return NO_ERROR;
+}
+
+int NativeWindow::CancelBuffer(ANativeWindow* window,
+ ANativeWindowBuffer* buffer, int fence_fd) {
+ ATRACE_NAME("NativeWindow::CancelBuffer");
+ ALOGI_IF(TRACE, "NativeWindow::CancelBuffer: fence_fd: %d", fence_fd);
+
+ NativeWindow* self = getSelf(window);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ NativeBufferProducer* native_buffer =
+ static_cast<NativeBufferProducer*>(buffer);
+ ATRACE_ASYNC_END("BufferDraw", native_buffer->buffer()->id());
+ ATRACE_INT("CancelBuffer", native_buffer->buffer()->id());
+ bool do_enqueue = true;
+ if (self->next_buffer_already_posted_) {
+ // Check that the buffer is the one we expect, but handle it if this happens
+ // in production by returning this buffer to the buffer queue.
+ DCHECK(native_buffer == self->next_post_buffer_.get());
+ if (native_buffer == self->next_post_buffer_.get()) {
+ do_enqueue = false;
+ }
+ }
+ if (do_enqueue) {
+ self->buffer_queue_.Enqueue(native_buffer);
+ }
+ if (fence_fd >= 0)
+ close(fence_fd);
+ self->next_buffer_already_posted_ = false;
+ self->next_post_buffer_ = nullptr;
+
+ return NO_ERROR;
+}
+
+int NativeWindow::Query(const ANativeWindow* window, int what, int* value) {
+ NativeWindow* self = getSelf(const_cast<ANativeWindow*>(window));
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ switch (what) {
+ case NATIVE_WINDOW_WIDTH:
+ *value = self->surface_->width();
+ return NO_ERROR;
+ case NATIVE_WINDOW_HEIGHT:
+ *value = self->surface_->height();
+ return NO_ERROR;
+ case NATIVE_WINDOW_FORMAT:
+ *value = self->surface_->format();
+ return NO_ERROR;
+ case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
+ *value = 1;
+ return NO_ERROR;
+ case NATIVE_WINDOW_CONCRETE_TYPE:
+ *value = NATIVE_WINDOW_SURFACE;
+ return NO_ERROR;
+ case NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER:
+ *value = 1;
+ return NO_ERROR;
+ case NATIVE_WINDOW_DEFAULT_WIDTH:
+ *value = self->surface_->width();
+ return NO_ERROR;
+ case NATIVE_WINDOW_DEFAULT_HEIGHT:
+ *value = self->surface_->height();
+ return NO_ERROR;
+ case NATIVE_WINDOW_TRANSFORM_HINT:
+ *value = 0;
+ return NO_ERROR;
+ }
+
+ *value = 0;
+ return BAD_VALUE;
+}
+
+int NativeWindow::Perform(ANativeWindow* window, int operation, ...) {
+ NativeWindow* self = getSelf(window);
+ std::lock_guard<std::mutex> autolock(self->lock_);
+
+ va_list args;
+ va_start(args, operation);
+
+ // TODO(eieio): The following operations are not used at this time. They are
+ // included here to help document which operations may be useful and what
+ // parameters they take.
+ switch (operation) {
+ case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: {
+ int w = va_arg(args, int);
+ int h = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: w=%d h=%d", w, h);
+ return NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_SET_BUFFERS_FORMAT: {
+ int format = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_FORMAT: format=%d", format);
+ return NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: {
+ int transform = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: transform=%d",
+ transform);
+ return NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_SET_USAGE: {
+ int usage = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_USAGE: usage=%d", usage);
+ return NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_CONNECT:
+ case NATIVE_WINDOW_DISCONNECT:
+ case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
+ case NATIVE_WINDOW_API_CONNECT:
+ case NATIVE_WINDOW_API_DISCONNECT:
+ // TODO(eieio): we should implement these
+ return NO_ERROR;
+
+ case NATIVE_WINDOW_SET_BUFFER_COUNT: {
+ int buffer_count = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFER_COUNT: bufferCount=%d",
+ buffer_count);
+ return NO_ERROR;
+ }
+ case NATIVE_WINDOW_SET_BUFFERS_DATASPACE: {
+ android_dataspace_t data_space =
+ static_cast<android_dataspace_t>(va_arg(args, int));
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_BUFFERS_DATASPACE: dataSpace=%d",
+ data_space);
+ return NO_ERROR;
+ }
+ case NATIVE_WINDOW_SET_SCALING_MODE: {
+ int mode = va_arg(args, int);
+ ALOGD_IF(TRACE, "NATIVE_WINDOW_SET_SCALING_MODE: mode=%d", mode);
+ return NO_ERROR;
+ }
+
+ case NATIVE_WINDOW_LOCK:
+ case NATIVE_WINDOW_UNLOCK_AND_POST:
+ case NATIVE_WINDOW_SET_CROP:
+ case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
+ return INVALID_OPERATION;
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+int NativeWindow::DequeueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer** buffer) {
+ int fence_fd = -1;
+ int ret = DequeueBuffer(window, buffer, &fence_fd);
+
+ // wait for fence
+ if (ret == NO_ERROR && fence_fd != -1)
+ close(fence_fd);
+
+ return ret;
+}
+
+int NativeWindow::CancelBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer) {
+ return CancelBuffer(window, buffer, -1);
+}
+
+int NativeWindow::QueueBuffer_DEPRECATED(ANativeWindow* window,
+ ANativeWindowBuffer* buffer) {
+ return QueueBuffer(window, buffer, -1);
+}
+
+int NativeWindow::LockBuffer_DEPRECATED(ANativeWindow* /*window*/,
+ ANativeWindowBuffer* /*buffer*/) {
+ return NO_ERROR;
+}
+
+} // namespace dvr
+} // namespace android
+
+static EGLNativeWindowType CreateDisplaySurface(int* display_width,
+ int* display_height, int format,
+ int usage, int flags) {
+ auto client = android::dvr::DisplayClient::Create();
+ if (!client) {
+ ALOGE("Failed to create display client!");
+ return nullptr;
+ }
+
+ // TODO(eieio,jbates): Consider passing flags and other parameters to get
+ // metrics based on specific surface requirements.
+ android::dvr::SystemDisplayMetrics metrics;
+ const int ret = client->GetDisplayMetrics(&metrics);
+ if (ret < 0) {
+ ALOGE("Failed to get display metrics: %s", strerror(-ret));
+ return nullptr;
+ }
+
+ int width, height;
+
+ if (flags & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION) {
+ width = metrics.display_native_width;
+ height = metrics.display_native_height;
+ } else {
+ width = metrics.distorted_width;
+ height = metrics.distorted_height;
+ }
+
+ std::shared_ptr<android::dvr::DisplaySurfaceClient> surface =
+ client->CreateDisplaySurface(width, height, format, usage, flags);
+
+ if (display_width)
+ *display_width = metrics.display_native_width;
+ if (display_height)
+ *display_height = metrics.display_native_height;
+
+ // Set the surface visible by default.
+ // TODO(eieio,jbates): Remove this from here and set visible somewhere closer
+ // to the application to account for situations where the application wants to
+ // create surfaces that will be used later or shouldn't be visible yet.
+ surface->SetVisible(true);
+
+ return new android::dvr::NativeWindow(surface);
+}
+
+std::shared_ptr<android::dvr::DisplaySurfaceClient> CreateDisplaySurfaceClient(
+ struct DvrSurfaceParameter* parameters,
+ /*out*/ android::dvr::SystemDisplayMetrics* metrics);
+
+extern "C" EGLNativeWindowType dvrCreateDisplaySurfaceExtended(
+ struct DvrSurfaceParameter* parameters) {
+ android::dvr::SystemDisplayMetrics metrics;
+ auto surface = CreateDisplaySurfaceClient(parameters, &metrics);
+ if (!surface) {
+ ALOGE("Failed to create display surface client");
+ return nullptr;
+ }
+ return new android::dvr::NativeWindow(surface);
+}
+
+extern "C" EGLNativeWindowType dvrCreateDisplaySurface() {
+ return CreateDisplaySurface(NULL, NULL, kDefaultDisplaySurfaceFormat,
+ kDefaultDisplaySurfaceUsage,
+ kUnwarpedDisplaySurfaceFlags);
+}
+
+extern "C" EGLNativeWindowType dvrCreateWarpedDisplaySurface(
+ int* display_width, int* display_height) {
+ return CreateDisplaySurface(
+ display_width, display_height, kDefaultDisplaySurfaceFormat,
+ kDefaultDisplaySurfaceUsage, kWarpedDisplaySurfaceFlags);
+}
+
+extern "C" void dvrDisplaySurfaceSetVisible(EGLNativeWindowType window,
+ int visible) {
+ auto native_window = reinterpret_cast<android::dvr::NativeWindow*>(window);
+ native_window->SetVisible(visible);
+}
+
+extern "C" void dvrDisplaySurfaceSetZOrder(EGLNativeWindowType window,
+ int z_order) {
+ auto native_window = reinterpret_cast<android::dvr::NativeWindow*>(window);
+ native_window->SetZOrder(z_order);
+}
+
+extern "C" void dvrDisplayPostEarly(EGLNativeWindowType window) {
+ auto native_window = reinterpret_cast<android::dvr::NativeWindow*>(window);
+ native_window->PostEarly();
+}
diff --git a/libs/vr/libdisplay/native_window_test.cpp b/libs/vr/libdisplay/native_window_test.cpp
new file mode 100644
index 0000000..2f3bc33
--- /dev/null
+++ b/libs/vr/libdisplay/native_window_test.cpp
@@ -0,0 +1,43 @@
+#include <iostream>
+#include <memory>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <dvr/graphics.h>
+#include <private/dvr/display_client.h>
+
+#include <cpp_free_mock/cpp_free_mock.h>
+
+// Checks querying the VSync of the device on display surface creation.
+TEST(CreateDisplaySurface, QueryVSyncPeriod) {
+ using ::testing::_;
+
+ const uint64_t kExpectedVSync = 123456;
+
+ // We only care about the expected VSync value
+ android::dvr::DisplayMetrics metrics;
+ metrics.vsync_period_ns = kExpectedVSync;
+
+ uint64_t outPeriod;
+
+ DvrSurfaceParameter display_params[] = {
+ DVR_SURFACE_PARAMETER_IN(WIDTH, 256),
+ DVR_SURFACE_PARAMETER_IN(HEIGHT, 256),
+ DVR_SURFACE_PARAMETER_OUT(VSYNC_PERIOD, &outPeriod),
+ DVR_SURFACE_PARAMETER_LIST_END,
+ };
+
+ // inject the mocking code to the target method
+ auto mocked_function =
+ MOCKER(&android::dvr::DisplayClient::GetDisplayMetrics);
+
+ // instrument the mock function to return our custom metrics
+ EXPECT_CALL(*mocked_function, MOCK_FUNCTION(_, _))
+ .WillOnce(::testing::DoAll(::testing::SetArgPointee<1>(metrics),
+ ::testing::Return(0)));
+
+ ASSERT_NE(nullptr, dvrCreateDisplaySurfaceExtended(display_params));
+
+ EXPECT_EQ(kExpectedVSync, outPeriod);
+}
diff --git a/libs/vr/libdisplay/screenshot_client.cpp b/libs/vr/libdisplay/screenshot_client.cpp
new file mode 100644
index 0000000..78f5e0f
--- /dev/null
+++ b/libs/vr/libdisplay/screenshot_client.cpp
@@ -0,0 +1,66 @@
+#include "include/private/dvr/screenshot_client.h"
+
+#include <cutils/log.h>
+
+#include <mutex>
+
+#include <pdx/default_transport/client_channel_factory.h>
+#include <private/dvr/display_rpc.h>
+
+using android::pdx::Transaction;
+using android::pdx::rpc::ClientPayload;
+using android::pdx::rpc::MessageBuffer;
+using android::pdx::rpc::ReplyBuffer;
+
+namespace android {
+namespace dvr {
+
+namespace {
+// Maximum supported pixels for screenshot capture. If the actual target buffer
+// is more than this, an error will be reported.
+constexpr int kMaxScreenshotPixels = 6000 * 4000;
+} // namespace
+
+int ScreenshotClient::Take(std::vector<uint8_t>* out_image, int index,
+ int* return_width, int* return_height) {
+ if (format_ != HAL_PIXEL_FORMAT_RGB_888) {
+ ALOGE("ScreenshotClient::Take: Unsupported layout format: format=%d",
+ format_);
+ return -ENOSYS;
+ }
+
+ // TODO(eieio): Make a cleaner way to ensure enough capacity for send or
+ // receive buffers. This method assumes TLS buffers that will maintain
+ // capacity across calls within the same thread.
+ MessageBuffer<ReplyBuffer>::Reserve(kMaxScreenshotPixels * 3);
+ auto status = InvokeRemoteMethod<DisplayScreenshotRPC::TakeScreenshot>(index);
+ if (!status) {
+ ALOGE("ScreenshotClient::Take: Failed to take screenshot: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+
+ *return_width = status.get().width;
+ *return_height = status.get().height;
+ *out_image = std::move(status.take().buffer);
+ return 0;
+}
+
+ScreenshotClient::ScreenshotClient()
+ : BASE(pdx::default_transport::ClientChannelFactory::Create(
+ DisplayScreenshotRPC::kClientPath)) {
+ auto status = InvokeRemoteMethod<DisplayScreenshotRPC::GetFormat>();
+ if (!status) {
+ ALOGE(
+ "ScreenshotClient::ScreenshotClient: Failed to retrieve screenshot "
+ "layout: %s",
+ status.GetErrorMessage().c_str());
+
+ Close(status.error());
+ } else {
+ format_ = status.get();
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/system/CPPLINT.cfg b/libs/vr/libdisplay/system/CPPLINT.cfg
new file mode 100644
index 0000000..2f8a3c0
--- /dev/null
+++ b/libs/vr/libdisplay/system/CPPLINT.cfg
@@ -0,0 +1 @@
+filter=-build/header_guard
diff --git a/libs/vr/libdisplay/tests/graphics_app_tests.cpp b/libs/vr/libdisplay/tests/graphics_app_tests.cpp
new file mode 100644
index 0000000..7ea3952
--- /dev/null
+++ b/libs/vr/libdisplay/tests/graphics_app_tests.cpp
@@ -0,0 +1,177 @@
+#include <dvr/graphics.h>
+#include <gtest/gtest.h>
+
+TEST(GraphicsAppTests, CreateWarpedDisplaySurfaceParams) {
+ int width = 0, height = 0;
+ EGLNativeWindowType window = dvrCreateWarpedDisplaySurface(&width, &height);
+ EXPECT_GT(width, 0);
+ EXPECT_GT(height, 0);
+ EXPECT_NE(window, nullptr);
+}
+
+TEST(GraphicsAppTests, CreateDisplaySurface) {
+ EGLNativeWindowType window = dvrCreateDisplaySurface();
+ EXPECT_NE(window, nullptr);
+}
+
+TEST(GraphicsAppTests, CreateDisplaySurfaceExtended) {
+ int display_width = 0, display_height = 0;
+ int surface_width = 0, surface_height = 0;
+ float inter_lens_meters = 0.0f;
+ float left_fov[4] = {0.0f};
+ float right_fov[4] = {0.0f};
+ int disable_warp = 0;
+ DvrSurfaceParameter surface_params[] = {
+ DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
+ DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
+ DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
+ DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
+ DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
+ DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
+ DVR_SURFACE_PARAMETER_LIST_END,
+ };
+
+ EGLNativeWindowType window = dvrCreateDisplaySurfaceExtended(surface_params);
+ EXPECT_NE(window, nullptr);
+ EXPECT_GT(display_width, 0);
+ EXPECT_GT(display_height, 0);
+ EXPECT_GT(surface_width, 0);
+ EXPECT_GT(surface_height, 0);
+ EXPECT_GT(inter_lens_meters, 0);
+ EXPECT_GT(left_fov[0], 0);
+ EXPECT_GT(left_fov[1], 0);
+ EXPECT_GT(left_fov[2], 0);
+ EXPECT_GT(left_fov[3], 0);
+ EXPECT_GT(right_fov[0], 0);
+ EXPECT_GT(right_fov[1], 0);
+ EXPECT_GT(right_fov[2], 0);
+ EXPECT_GT(right_fov[3], 0);
+}
+
+TEST(GraphicsAppTests, GetNativeDisplayDimensions) {
+ int width, height;
+ dvrGetNativeDisplayDimensions(&width, &height);
+ EXPECT_GT(width, 0);
+ EXPECT_GT(height, 0);
+}
+
+TEST(GraphicsAppTests, GetDisplaySurfaceInfo) {
+ int ret, width, height, format;
+ EGLNativeWindowType window = dvrCreateDisplaySurface();
+ ASSERT_NE(window, nullptr);
+ ret = dvrGetDisplaySurfaceInfo(window, &width, &height, &format);
+ ASSERT_EQ(0, ret);
+ ASSERT_GT(width, 0);
+ ASSERT_GT(height, 0);
+ ASSERT_NE(0, format);
+}
+
+// TODO(jpoichet) How to check it worked?
+TEST(GraphicsAppTests, GraphicsSurfaceSetVisible) {
+ DvrSurfaceParameter surface_params[] = {DVR_SURFACE_PARAMETER_LIST_END};
+ DvrGraphicsContext* context = nullptr;
+ int result = dvrGraphicsContextCreate(surface_params, &context);
+ ASSERT_GE(result, 0);
+ ASSERT_NE(context, nullptr);
+ dvrGraphicsSurfaceSetVisible(context, 0);
+ dvrGraphicsSurfaceSetVisible(context, 1);
+ dvrGraphicsSurfaceSetVisible(context, 2);
+}
+
+// TODO(jpoichet) How to check it worked?
+TEST(GraphicsAppTests, GraphicsSurfaceSetZOrder) {
+ DvrSurfaceParameter surface_params[] = {DVR_SURFACE_PARAMETER_LIST_END};
+ DvrGraphicsContext* context = nullptr;
+ int result = dvrGraphicsContextCreate(surface_params, &context);
+ ASSERT_GE(result, 0);
+ ASSERT_NE(context, nullptr);
+ dvrGraphicsSurfaceSetZOrder(context, -1);
+ dvrGraphicsSurfaceSetZOrder(context, 0);
+ dvrGraphicsSurfaceSetZOrder(context, 1);
+ dvrGraphicsSurfaceSetZOrder(context, 2);
+}
+
+TEST(GraphicsAppTests, GraphicsContext) {
+ DvrGraphicsContext* context = 0;
+ int display_width = 0, display_height = 0;
+ int surface_width = 0, surface_height = 0;
+ float inter_lens_meters = 0.0f;
+ float left_fov[4] = {0.0f};
+ float right_fov[4] = {0.0f};
+ uint64_t vsync_period = 0;
+ int disable_warp = 0;
+ DvrSurfaceParameter surface_params[] = {
+ DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
+ DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
+ DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
+ DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
+ DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
+ DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
+ DVR_SURFACE_PARAMETER_OUT(VSYNC_PERIOD, &vsync_period),
+ DVR_SURFACE_PARAMETER_LIST_END,
+ };
+ dvrGraphicsContextCreate(surface_params, &context);
+ EXPECT_NE(nullptr, context);
+
+ DvrFrameSchedule schedule;
+ int wait_result = dvrGraphicsWaitNextFrame(context, 0, &schedule);
+ EXPECT_EQ(wait_result, 0);
+ EXPECT_GE(schedule.vsync_count, 0u);
+
+ dvrBeginRenderFrame(context);
+
+ // Check range of vsync period from 70fps to 100fps.
+ // TODO(jbates) Once we have stable hardware, clamp this range down further.
+ EXPECT_LT(vsync_period, 1000000000ul / 70ul);
+ EXPECT_GT(vsync_period, 1000000000ul / 100ul);
+
+ dvrPresent(context);
+ dvrGraphicsContextDestroy(context);
+}
+
+TEST(GraphicsAppTests, CustomSurfaceSize) {
+ DvrGraphicsContext* context = 0;
+ int display_width = 0, display_height = 0;
+ int surface_width = 0, surface_height = 0;
+ float inter_lens_meters = 0.0f;
+ float left_fov[4] = {0.0f};
+ float right_fov[4] = {0.0f};
+ int disable_warp = 0;
+ int req_width = 256, req_height = 128;
+ DvrSurfaceParameter surface_params[] = {
+ DVR_SURFACE_PARAMETER_IN(WIDTH, req_width),
+ DVR_SURFACE_PARAMETER_IN(HEIGHT, req_height),
+ DVR_SURFACE_PARAMETER_IN(DISABLE_DISTORTION, disable_warp),
+ DVR_SURFACE_PARAMETER_OUT(DISPLAY_WIDTH, &display_width),
+ DVR_SURFACE_PARAMETER_OUT(DISPLAY_HEIGHT, &display_height),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_WIDTH, &surface_width),
+ DVR_SURFACE_PARAMETER_OUT(SURFACE_HEIGHT, &surface_height),
+ DVR_SURFACE_PARAMETER_OUT(INTER_LENS_METERS, &inter_lens_meters),
+ DVR_SURFACE_PARAMETER_OUT(LEFT_FOV_LRBT, left_fov),
+ DVR_SURFACE_PARAMETER_OUT(RIGHT_FOV_LRBT, right_fov),
+ DVR_SURFACE_PARAMETER_LIST_END,
+ };
+ dvrGraphicsContextCreate(surface_params, &context);
+ EXPECT_NE(nullptr, context);
+
+ EXPECT_EQ(req_width, surface_width);
+ EXPECT_EQ(req_height, surface_height);
+ dvrGraphicsContextDestroy(context);
+}
+
+TEST(GraphicsAppTests, CreateVideoMeshSurface) {
+ DvrSurfaceParameter surface_params[] = {DVR_SURFACE_PARAMETER_LIST_END};
+ DvrGraphicsContext* context = nullptr;
+ int result = dvrGraphicsContextCreate(surface_params, &context);
+ EXPECT_NE(nullptr, context);
+ EXPECT_EQ(result, 0);
+
+ DvrVideoMeshSurface* surface = dvrGraphicsVideoMeshSurfaceCreate(context);
+ EXPECT_NE(nullptr, surface);
+
+ dvrGraphicsVideoMeshSurfaceDestroy(surface);
+}
diff --git a/libs/vr/libdisplay/video_mesh_surface_client.cpp b/libs/vr/libdisplay/video_mesh_surface_client.cpp
new file mode 100644
index 0000000..04cc194
--- /dev/null
+++ b/libs/vr/libdisplay/video_mesh_surface_client.cpp
@@ -0,0 +1,61 @@
+#include "include/private/dvr/video_mesh_surface_client.h"
+
+using android::pdx::LocalChannelHandle;
+
+namespace android {
+namespace dvr {
+
+/* static */
+std::unique_ptr<VideoMeshSurfaceClient> VideoMeshSurfaceClient::Import(
+ LocalChannelHandle handle) {
+ return VideoMeshSurfaceClient::Create(std::move(handle));
+}
+
+VideoMeshSurfaceClient::VideoMeshSurfaceClient(LocalChannelHandle handle)
+ : BASE(std::move(handle), SurfaceTypeEnum::VideoMesh),
+ mapped_metadata_buffer_(nullptr) {
+ // TODO(jwcai) import more data if needed.
+}
+
+std::shared_ptr<ProducerQueue> VideoMeshSurfaceClient::GetProducerQueue() {
+ if (producer_queue_ == nullptr) {
+ // Create producer queue through DisplayRPC
+ auto status =
+ InvokeRemoteMethod<DisplayRPC::VideoMeshSurfaceCreateProducerQueue>();
+ if (!status) {
+ ALOGE(
+ "VideoMeshSurfaceClient::GetProducerQueue: failed to create producer "
+ "queue: %s",
+ status.GetErrorMessage().c_str());
+ return nullptr;
+ }
+
+ producer_queue_ =
+ ProducerQueue::Import<VideoMeshSurfaceBufferMetadata>(status.take());
+ }
+ return producer_queue_;
+}
+
+volatile VideoMeshSurfaceMetadata*
+VideoMeshSurfaceClient::GetMetadataBufferPtr() {
+ if (!mapped_metadata_buffer_) {
+ if (auto buffer_producer = GetMetadataBuffer()) {
+ void* addr = nullptr;
+ const int ret = buffer_producer->GetBlobReadWritePointer(
+ sizeof(VideoMeshSurfaceMetadata), &addr);
+ if (ret < 0) {
+ ALOGE(
+ "VideoMeshSurfaceClient::GetMetadataBufferPtr: Failed to map "
+ "surface metadata: %s",
+ strerror(-ret));
+ return nullptr;
+ }
+ mapped_metadata_buffer_ = static_cast<VideoMeshSurfaceMetadata*>(addr);
+ }
+ }
+
+ return mapped_metadata_buffer_;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/vsync_client.cpp b/libs/vr/libdisplay/vsync_client.cpp
new file mode 100644
index 0000000..c4cad50
--- /dev/null
+++ b/libs/vr/libdisplay/vsync_client.cpp
@@ -0,0 +1,72 @@
+#include "include/private/dvr/vsync_client.h"
+
+#include <cutils/log.h>
+
+#include <pdx/default_transport/client_channel_factory.h>
+#include <private/dvr/display_rpc.h>
+
+using android::pdx::Transaction;
+
+namespace android {
+namespace dvr {
+
+VSyncClient::VSyncClient(long timeout_ms)
+ : BASE(pdx::default_transport::ClientChannelFactory::Create(
+ DisplayVSyncRPC::kClientPath),
+ timeout_ms) {}
+
+VSyncClient::VSyncClient()
+ : BASE(pdx::default_transport::ClientChannelFactory::Create(
+ DisplayVSyncRPC::kClientPath)) {}
+
+int VSyncClient::Wait(int64_t* timestamp_ns) {
+ auto status = InvokeRemoteMethod<DisplayVSyncRPC::Wait>();
+ if (!status) {
+ ALOGE("VSyncClient::Wait: Failed to wait for vsync: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+ *timestamp_ns = status.get();
+ return 0;
+}
+
+int VSyncClient::GetFd() { return event_fd(); }
+
+int VSyncClient::GetLastTimestamp(int64_t* timestamp_ns) {
+ auto status = InvokeRemoteMethod<DisplayVSyncRPC::GetLastTimestamp>();
+ if (!status) {
+ ALOGE("VSyncClient::GetLastTimestamp: Failed to get vsync timestamp: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+ *timestamp_ns = status.get();
+ return 0;
+}
+
+int VSyncClient::GetSchedInfo(int64_t* vsync_period_ns, int64_t* timestamp_ns,
+ uint32_t* next_vsync_count) {
+ if (!vsync_period_ns || !timestamp_ns || !next_vsync_count)
+ return -EINVAL;
+
+ auto status = InvokeRemoteMethod<DisplayVSyncRPC::GetSchedInfo>();
+ if (!status) {
+ ALOGE("VSyncClient::GetSchedInfo:: Failed to get warp timestamp: %s",
+ status.GetErrorMessage().c_str());
+ return -status.error();
+ }
+
+ *vsync_period_ns = status.get().vsync_period_ns;
+ *timestamp_ns = status.get().timestamp_ns;
+ *next_vsync_count = status.get().next_vsync_count;
+ return 0;
+}
+
+int VSyncClient::Acknowledge() {
+ auto status = InvokeRemoteMethod<DisplayVSyncRPC::Acknowledge>();
+ ALOGE_IF(!status, "VSuncClient::Acknowledge: Failed to ack vsync because: %s",
+ status.GetErrorMessage().c_str());
+ return ReturnStatusOrError(status);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libdisplay/vsync_client_api.cpp b/libs/vr/libdisplay/vsync_client_api.cpp
new file mode 100644
index 0000000..56103ed
--- /dev/null
+++ b/libs/vr/libdisplay/vsync_client_api.cpp
@@ -0,0 +1,34 @@
+#include "include/private/dvr/vsync_client_api.h"
+
+#include <private/dvr/vsync_client.h>
+
+extern "C" {
+
+dvr_vsync_client* dvr_vsync_client_create() {
+ auto client = android::dvr::VSyncClient::Create();
+ return static_cast<dvr_vsync_client*>(client.release());
+}
+
+void dvr_vsync_client_destroy(dvr_vsync_client* client) {
+ delete static_cast<android::dvr::VSyncClient*>(client);
+}
+
+int dvr_vsync_client_wait(dvr_vsync_client* client, int64_t* timestamp_ns) {
+ return static_cast<android::dvr::VSyncClient*>(client)->Wait(timestamp_ns);
+}
+
+int dvr_vsync_client_get_fd(dvr_vsync_client* client) {
+ return static_cast<android::dvr::VSyncClient*>(client)->GetFd();
+}
+
+int dvr_vsync_client_acknowledge(dvr_vsync_client* client) {
+ return static_cast<android::dvr::VSyncClient*>(client)->Acknowledge();
+}
+
+int dvr_vsync_client_get_last_timestamp(dvr_vsync_client* client,
+ int64_t* timestamp_ns) {
+ return static_cast<android::dvr::VSyncClient*>(client)->GetLastTimestamp(
+ timestamp_ns);
+}
+
+} // extern "C"