Add libvrflinger for use in SurfaceFlinger
A separate CL uses this code from SurfaceFlinger.
Bug: None
Test: Manually ran modified SurfaceFlinger
Change-Id: I34588df1365588c0a0265e1e2325e3dd5516206a
diff --git a/libs/vr/libvrflinger/Android.mk b/libs/vr/libvrflinger/Android.mk
new file mode 100644
index 0000000..6b5e7cc
--- /dev/null
+++ b/libs/vr/libvrflinger/Android.mk
@@ -0,0 +1,87 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+sourceFiles := \
+ acquired_buffer.cpp \
+ compositor.cpp \
+ debug_hud_data.cpp \
+ debug_hud_view.cpp \
+ display_manager_service.cpp \
+ display_service.cpp \
+ display_surface.cpp \
+ epoll_event_dispatcher.cpp \
+ hardware_composer.cpp \
+ screenshot_service.cpp \
+ surface_channel.cpp \
+ video_compositor.cpp \
+ video_mesh_surface.cpp \
+ vr_flinger.cpp \
+ vsync_service.cpp
+
+includeFiles := $(LOCAL_PATH)/include
+
+staticLibraries := \
+ libsurfaceflingerincludes \
+ libhwcomposer-command-buffer \
+ libbufferhub \
+ libbufferhubqueue \
+ libeds \
+ libdisplay \
+ libdvrcommon \
+ libdvrgraphics \
+ libperformance \
+ libsensor \
+ libpdx_default_transport \
+
+sharedLibraries := \
+ android.dvr.composer@1.0 \
+ android.hardware.graphics.allocator@2.0 \
+ android.hardware.graphics.composer@2.1 \
+ libbinder \
+ libbase \
+ libcutils \
+ liblog \
+ libhardware \
+ libutils \
+ libEGL \
+ libGLESv1_CM \
+ libGLESv2 \
+ libvulkan \
+ libui \
+ libgui \
+ libsync \
+ libhidlbase \
+ libhidltransport \
+ libfmq \
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := $(sourceFiles)
+LOCAL_C_INCLUDES := $(includeFiles)
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(includeFiles)
+
+LOCAL_CFLAGS += -DLOG_TAG=\"vr_flinger\"
+LOCAL_CFLAGS += -DTRACE=0
+LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_GRAPHICS
+LOCAL_CFLAGS += -DGL_GLEXT_PROTOTYPES -DEGL_EGLEXT_PROTOTYPES
+ifeq ($(TARGET_USES_QCOM_BSP), true)
+ LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
+ LOCAL_C_INCLUDES += hardware/qcom/display/libqdutils
+ LOCAL_SHARED_LIBRARIES += libqdutils
+endif
+LOCAL_SHARED_LIBRARIES := $(sharedLibraries)
+LOCAL_WHOLE_STATIC_LIBRARIES := $(staticLibraries)
+LOCAL_MODULE := libvrflinger
+include $(BUILD_STATIC_LIBRARY)
diff --git a/libs/vr/libvrflinger/acquired_buffer.cpp b/libs/vr/libvrflinger/acquired_buffer.cpp
new file mode 100644
index 0000000..5a3aa7f
--- /dev/null
+++ b/libs/vr/libvrflinger/acquired_buffer.cpp
@@ -0,0 +1,100 @@
+#include "acquired_buffer.h"
+
+#include <log/log.h>
+#include <sync/sync.h>
+
+using android::pdx::LocalHandle;
+
+namespace android {
+namespace dvr {
+
+AcquiredBuffer::AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer,
+ LocalHandle acquire_fence, uint64_t /*sequence*/)
+ : buffer_(buffer), acquire_fence_(std::move(acquire_fence)) {}
+
+AcquiredBuffer::AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer,
+ int* error) {
+ LocalHandle fence;
+ const int ret = buffer->Acquire(&fence);
+
+ if (error)
+ *error = ret;
+
+ if (ret < 0) {
+ ALOGW("AcquiredBuffer::AcquiredBuffer: Failed to acquire buffer: %s",
+ strerror(-ret));
+ buffer_ = nullptr;
+ // Default construct sets acquire_fence_ to empty.
+ } else {
+ buffer_ = buffer;
+ acquire_fence_ = std::move(fence);
+ }
+}
+
+AcquiredBuffer::AcquiredBuffer(AcquiredBuffer&& other)
+ : buffer_(std::move(other.buffer_)),
+ acquire_fence_(std::move(other.acquire_fence_)) {}
+
+AcquiredBuffer::~AcquiredBuffer() { Release(LocalHandle(kEmptyFence)); }
+
+AcquiredBuffer& AcquiredBuffer::operator=(AcquiredBuffer&& other) {
+ if (this != &other) {
+ Release(LocalHandle(kEmptyFence));
+
+ buffer_ = std::move(other.buffer_);
+ acquire_fence_ = std::move(other.acquire_fence_);
+ }
+ return *this;
+}
+
+bool AcquiredBuffer::IsAvailable() const {
+ if (IsEmpty())
+ return false;
+
+ // Only check the fence if the acquire fence is not empty.
+ if (acquire_fence_) {
+ const int ret = sync_wait(acquire_fence_.Get(), 0);
+ ALOGD_IF(TRACE || (ret < 0 && errno != ETIME),
+ "AcquiredBuffer::IsAvailable: acquire_fence_=%d sync_wait()=%d "
+ "errno=%d.",
+ acquire_fence_.Get(), ret, ret < 0 ? errno : 0);
+ if (ret == 0) {
+ // The fence is completed, so to avoid further calls to sync_wait we close
+ // it here.
+ acquire_fence_.Close();
+ }
+ return ret == 0;
+ } else {
+ return true;
+ }
+}
+
+LocalHandle AcquiredBuffer::ClaimAcquireFence() {
+ return std::move(acquire_fence_);
+}
+
+std::shared_ptr<BufferConsumer> AcquiredBuffer::ClaimBuffer() {
+ return std::move(buffer_);
+}
+
+int AcquiredBuffer::Release(LocalHandle release_fence) {
+ if (buffer_) {
+ // Close the release fence since we can't transfer it with an async release.
+ release_fence.Close();
+ const int ret = buffer_->ReleaseAsync();
+ if (ret < 0) {
+ ALOGE("AcquiredBuffer::Release: Failed to release buffer %d: %s",
+ buffer_->id(), strerror(-ret));
+ if (ret != -ESHUTDOWN)
+ return ret;
+ }
+
+ buffer_ = nullptr;
+ acquire_fence_.Close();
+ }
+
+ return 0;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/acquired_buffer.h b/libs/vr/libvrflinger/acquired_buffer.h
new file mode 100644
index 0000000..050cd5f
--- /dev/null
+++ b/libs/vr/libvrflinger/acquired_buffer.h
@@ -0,0 +1,82 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_ACQUIRED_BUFFER_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_ACQUIRED_BUFFER_H_
+
+#include <pdx/file_handle.h>
+#include <private/dvr/buffer_hub_client.h>
+
+#include <memory>
+
+namespace android {
+namespace dvr {
+
+// Manages the ACQUIRE/RELEASE ownership cycle of a BufferConsumer.
+class AcquiredBuffer {
+ public:
+ static constexpr int kEmptyFence = pdx::LocalHandle::kEmptyFileHandle;
+
+ AcquiredBuffer() : buffer_(nullptr), acquire_fence_(kEmptyFence) {}
+
+ // Constructs an AcquiredBuffer from a BufferConsumer pointer and an acquire
+ // fence. The BufferConsumer MUST be in the ACQUIRED state prior to calling
+ // this constructor; the constructor does not attempt to ACQUIRE the buffer
+ // itself.
+ AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer,
+ pdx::LocalHandle acquire_fence, uint64_t sequence);
+
+ // Constructs an AcquiredBuffer from a BufferConsumer. The BufferConsumer MUST
+ // be in the POSTED state prior to calling this constructor, as this
+ // constructor attempts to ACQUIRE the buffer. If ACQUIRING the buffer fails
+ // this instance is left in the empty state. An optional error code is
+ // returned in |error|, which may be nullptr if not needed.
+ AcquiredBuffer(const std::shared_ptr<BufferConsumer>& buffer, int* error);
+
+ // Move constructor. Behaves similarly to the move assignment operator below.
+ AcquiredBuffer(AcquiredBuffer&& other);
+
+ ~AcquiredBuffer();
+
+ // Move assignment operator. Moves the BufferConsumer and acquire fence from
+ // |other| into this instance after RELEASING the current BufferConsumer and
+ // closing the acquire fence. After the move |other| is left in the empty
+ // state.
+ AcquiredBuffer& operator=(AcquiredBuffer&& other);
+
+ // Accessors for the underlying BufferConsumer, the acquire fence, and the
+ // use-case specific sequence value from the acquisition (see
+ // dreamos/buffer_hub_client.h).
+ std::shared_ptr<BufferConsumer> buffer() const { return buffer_; }
+ int acquire_fence() const { return acquire_fence_.Get(); }
+
+ // When non-empty, returns true if the acquired fence was signaled (or if the
+ // fence is empty). Returns false when empty or if the fence is not signaled.
+ bool IsAvailable() const;
+
+ bool IsEmpty() const { return buffer_ == nullptr; }
+
+ // Returns the acquire fence, passing ownership to the caller.
+ pdx::LocalHandle ClaimAcquireFence();
+
+ // Returns the buffer, passing ownership to the caller. Caller is responsible
+ // for calling Release on the returned buffer.
+ std::shared_ptr<BufferConsumer> ClaimBuffer();
+
+ // Releases the BufferConsumer, passing the release fence in |release_fence|
+ // to the producer. On success, the BufferConsumer and acquire fence are set
+ // to empty state; if release fails, the BufferConsumer and acquire fence are
+ // left in place and a negative error code is returned.
+ int Release(pdx::LocalHandle release_fence);
+
+ private:
+ AcquiredBuffer(const AcquiredBuffer&) = delete;
+ void operator=(const AcquiredBuffer&) = delete;
+
+ std::shared_ptr<BufferConsumer> buffer_;
+ // Mutable so that the fence can be closed when it is determined to be
+ // signaled during IsAvailable().
+ mutable pdx::LocalHandle acquire_fence_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_ACQUIRED_BUFFER_H_
diff --git a/libs/vr/libvrflinger/compositor.cpp b/libs/vr/libvrflinger/compositor.cpp
new file mode 100644
index 0000000..5a111d4
--- /dev/null
+++ b/libs/vr/libvrflinger/compositor.cpp
@@ -0,0 +1,873 @@
+#include "compositor.h"
+
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2.h>
+
+#include <memory>
+
+#include <cutils/properties.h>
+
+#include <dvr/graphics.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/debug.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/dummy_native_window.h>
+#include <private/dvr/gl_fenced_flush.h>
+#include <private/dvr/graphics/blur.h>
+#include <private/dvr/graphics/gpu_profiler.h>
+#include <private/dvr/lucid_metrics.h>
+#include <private/dvr/native_buffer.h>
+#include <private/dvr/platform_defines.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "debug_hud_data.h"
+#include "debug_hud_view.h"
+#include "display_surface.h"
+
+#define BINNING_CONTROL_HINT_QCOM 0x8FB0
+
+// Accepted by the <hint> parameter of glHint:
+#define BINNING_QCOM 0x8FB1
+#define VISIBILITY_OPTIMIZED_BINNING_QCOM 0x8FB2
+#define RENDER_DIRECT_TO_FRAMEBUFFER_QCOM 0x8FB3
+
+#ifndef EGL_CONTEXT_MAJOR_VERSION
+#define EGL_CONTEXT_MAJOR_VERSION 0x3098
+#define EGL_CONTEXT_MINOR_VERSION 0x30FB
+#endif
+
+using android::pdx::LocalHandle;
+
+static const int kDistortionMeshResolution = 40;
+
+static std::shared_ptr<int64_t> eds_gpu_duration_ns =
+ std::make_shared<int64_t>(0);
+
+static constexpr char kDisableLensDistortionProp[] =
+ "persist.dreamos.disable_distort";
+
+static constexpr char kEnableEdsPoseSaveProp[] =
+ "persist.dreamos.save_eds_pose";
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// An implementation of ANativeWindowBuffer backed by a temporary IonBuffer.
+// Do not hold on to this kind of object, because the IonBuffer may become
+// invalid in other scopes.
+class TemporaryNativeBuffer
+ : public ANativeObjectBase<ANativeWindowBuffer, TemporaryNativeBuffer,
+ LightRefBase<TemporaryNativeBuffer>> {
+ public:
+ explicit TemporaryNativeBuffer(const IonBuffer* buffer) : BASE() {
+ ANativeWindowBuffer::width = buffer->width();
+ ANativeWindowBuffer::height = buffer->height();
+ ANativeWindowBuffer::stride = buffer->stride();
+ ANativeWindowBuffer::format = buffer->format();
+ ANativeWindowBuffer::usage = buffer->usage();
+ // TODO(eieio): Update NYC to support layer_count.
+ // ANativeWindowBuffer::layer_count = 1;
+ handle = buffer->handle();
+ }
+
+ private:
+ friend class android::LightRefBase<TemporaryNativeBuffer>;
+
+ TemporaryNativeBuffer(const TemporaryNativeBuffer&) = delete;
+ void operator=(TemporaryNativeBuffer&) = delete;
+};
+
+std::vector<uint8_t> ReadTextureRGBA(GLuint texture_id, int width, int height) {
+ std::vector<uint8_t> data(width * height * 4);
+ GLuint fbo;
+ glGenFramebuffers(1, &fbo);
+ glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ texture_id, 0);
+ // Using default GL_PACK_ALIGNMENT of 4 for the 4 byte source data.
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data.data());
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ glDeleteFramebuffers(1, &fbo);
+ CHECK_GL();
+ return data;
+}
+
+} // namespace
+
+class Compositor::Texture {
+ public:
+ Texture(std::shared_ptr<BufferConsumer> consumer, EGLDisplay display,
+ int index);
+ ~Texture();
+
+ std::shared_ptr<BufferConsumer> consumer() const { return consumer_; }
+ GLuint texture_id() const { return texture_id_; }
+ vec2i size() const {
+ return vec2i(native_buffer_.get()->width, native_buffer_.get()->height);
+ }
+ int index() const { return index_; }
+
+ bool Initialize();
+
+ private:
+ Texture(const Texture&) = delete;
+ void operator=(const Texture&) = delete;
+
+ std::shared_ptr<BufferConsumer> consumer_;
+
+ android::sp<NativeBufferConsumer> native_buffer_;
+
+ EGLDisplay display_;
+ EGLImageKHR image_;
+ GLuint texture_id_;
+ int index_;
+};
+
+Compositor::Texture::Texture(std::shared_ptr<BufferConsumer> consumer,
+ EGLDisplay display, int index)
+ : consumer_(consumer),
+ display_(display),
+ image_(nullptr),
+ texture_id_(0),
+ index_(index) {}
+
+Compositor::Texture::~Texture() {
+ glDeleteTextures(1, &texture_id_);
+ eglDestroyImageKHR(display_, image_);
+}
+
+bool Compositor::Texture::Initialize() {
+ native_buffer_ = new NativeBufferConsumer(consumer_, index_);
+
+ CHECK_GL();
+ image_ = eglCreateImageKHR(
+ display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer_.get()), nullptr);
+ if (!image_) {
+ ALOGE("Failed to create EGLImage\n");
+ return false;
+ }
+
+ glGenTextures(1, &texture_id_);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ CHECK_GL();
+ return true;
+}
+
+Compositor::RenderTarget::RenderTarget()
+ : buffer_texture_id_(0),
+ buffer_framebuffer_id_(0),
+ buffer_image_(nullptr) {}
+
+Compositor::RenderTarget::~RenderTarget() { Destroy(); }
+
+void Compositor::RenderTarget::Destroy() {
+ glDeleteFramebuffers(1, &buffer_framebuffer_id_);
+ glDeleteTextures(1, &buffer_texture_id_);
+ eglDestroyImageKHR(eglGetDisplay(EGL_DEFAULT_DISPLAY), buffer_image_);
+ buffer_texture_id_ = 0;
+ buffer_framebuffer_id_ = 0;
+ buffer_image_ = nullptr;
+}
+
+void Compositor::RenderTarget::Initialize(int width, int height) {
+ LOG_ALWAYS_FATAL_IF(buffer_texture_id_ || buffer_framebuffer_id_ ||
+ buffer_image_);
+ constexpr int usage = GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER |
+ GRALLOC_USAGE_HW_RENDER |
+ GRALLOC_USAGE_QCOM_FRAMEBUFFER_COMPRESSION;
+ buffer_ = std::make_shared<IonBuffer>(width, height,
+ HAL_PIXEL_FORMAT_RGBA_8888, usage);
+
+ native_buffer_ = new NativeBuffer(buffer_);
+
+ buffer_image_ = eglCreateImageKHR(
+ eglGetDisplay(EGL_DEFAULT_DISPLAY), EGL_NO_CONTEXT,
+ EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer_.get()), nullptr);
+
+ glGenTextures(1, &buffer_texture_id_);
+ glBindTexture(GL_TEXTURE_2D, buffer_texture_id_);
+ CHECK_GL();
+
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, buffer_image_);
+ CHECK_GL();
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ // Generate a framebuffer.
+ glGenFramebuffers(1, &buffer_framebuffer_id_);
+ glBindFramebuffer(GL_FRAMEBUFFER, buffer_framebuffer_id_);
+ CHECK_GL();
+
+ // Attach the color buffer
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ buffer_texture_id_, 0);
+ CHECK_GL();
+ GLenum result = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ CHECK_GL();
+ if (result != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Framebuffer incomplete: %d", result);
+ }
+
+ // Clear the render target to black once. In direct render mode we never draw
+ // the corner pixels.
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ glFlush();
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+ CHECK_GL();
+}
+
+void Compositor::RenderTarget::BindFramebuffer() {
+ glBindFramebuffer(GL_FRAMEBUFFER, buffer_framebuffer_id_);
+}
+
+void Compositor::RenderTarget::DiscardColorAttachment() {
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ glDiscardFramebufferEXT(GL_FRAMEBUFFER, 1, &attachment);
+ CHECK_GL();
+}
+
+class Compositor::RenderPoseBufferObject {
+ public:
+ RenderPoseBufferObject(LocalHandle&& render_pose_buffer_fd) {
+ // Create new pose tracking buffer for this surface.
+ glGenBuffers(1, &render_pose_buffer_object_);
+ glBindBuffer(GL_UNIFORM_BUFFER, render_pose_buffer_object_);
+ if (render_pose_buffer_fd) {
+ LOG_ALWAYS_FATAL_IF(!glBindSharedBufferQCOM);
+ if (glBindSharedBufferQCOM)
+ glBindSharedBufferQCOM(GL_UNIFORM_BUFFER,
+ sizeof(DisplaySurfaceMetadata),
+ render_pose_buffer_fd.Get());
+ else
+ ALOGE("Error: Missing gralloc buffer extension");
+ CHECK_GL();
+ }
+ glBindBuffer(GL_UNIFORM_BUFFER, 0);
+ }
+
+ ~RenderPoseBufferObject() { glDeleteBuffers(1, &render_pose_buffer_object_); }
+
+ GLuint object_id() const { return render_pose_buffer_object_; }
+
+ private:
+ // Render pose buffer object. This contains an array of poses that corresponds
+ // with the surface buffers.
+ GLuint render_pose_buffer_object_;
+
+ RenderPoseBufferObject(const RenderPoseBufferObject&) = delete;
+ void operator=(const RenderPoseBufferObject&) = delete;
+};
+
+HeadMountMetrics CreateDefaultHeadMountMetrics() {
+ const bool enable_distortion =
+ property_get_bool(kDisableLensDistortionProp, 0) == 0;
+ return enable_distortion ? CreateHeadMountMetrics()
+ : CreateUndistortedHeadMountMetrics();
+}
+
+Compositor::Compositor()
+ : head_mount_metrics_(CreateDefaultHeadMountMetrics()),
+ display_(0),
+ config_(0),
+ surface_(0),
+ context_(0),
+ active_render_target_(0),
+ is_render_direct_(false),
+ compute_fbo_(0),
+ compute_fbo_texture_(0),
+ hmd_metrics_requires_update_(false),
+ eds_pose_capture_enabled_(false) {}
+
+Compositor::~Compositor() {}
+
+bool Compositor::Initialize(const DisplayMetrics& display_metrics) {
+ ATRACE_NAME("Compositor::Initialize");
+ if (!InitializeEGL())
+ return false;
+
+ display_metrics_ = display_metrics;
+ const int width = display_metrics_.GetSizePixels().x();
+ const int height = display_metrics_.GetSizePixels().y();
+
+ render_target_[0].Initialize(width, height);
+ render_target_[1].Initialize(width, height);
+
+ // EDS:
+ GpuProfiler::Get()->SetEnableGpuTracing(true);
+
+ eds_pose_capture_enabled_ = property_get_bool(kEnableEdsPoseSaveProp, 0) == 1;
+
+ CheckAndUpdateHeadMountMetrics(true);
+
+ debug_hud_.reset(new DebugHudView(*composite_hmd_.get()));
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+
+ return true;
+}
+
+void Compositor::UpdateHeadMountMetrics(
+ const HeadMountMetrics& head_mount_metrics) {
+ // Recalculating the mesh must be done in the draw loop, defer until then.
+ std::lock_guard<std::mutex> _lock(mutex_);
+ head_mount_metrics_ = head_mount_metrics;
+ hmd_metrics_requires_update_ = true;
+}
+
+void Compositor::CheckAndUpdateHeadMountMetrics(bool force_update) {
+ std::lock_guard<std::mutex> _lock(mutex_);
+ if (hmd_metrics_requires_update_ || force_update) {
+ hmd_metrics_requires_update_ = false;
+ composite_hmd_.reset(
+ new CompositeHmd(head_mount_metrics_, display_metrics_));
+ CHECK_GL();
+ eds_renderer_.reset(new DistortionRenderer(
+ *composite_hmd_.get(), display_metrics_.GetSizePixels(),
+ kDistortionMeshResolution, true, false, false, true, true));
+ }
+}
+
+bool Compositor::InitializeEGL() {
+ ATRACE_NAME("Compositor::InitializeEGL");
+ display_ = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (!display_) {
+ ALOGE("Failed to get egl display\n");
+ return false;
+ }
+
+ eglInitialize(display_, nullptr, nullptr);
+
+ EGLint attribs[] = {
+ EGL_BUFFER_SIZE,
+ 32,
+ EGL_ALPHA_SIZE,
+ 0,
+ EGL_BLUE_SIZE,
+ 8,
+ EGL_RED_SIZE,
+ 8,
+ EGL_GREEN_SIZE,
+ 8,
+ EGL_DEPTH_SIZE,
+ 0,
+ EGL_SURFACE_TYPE,
+ EGL_WINDOW_BIT,
+ EGL_RENDERABLE_TYPE,
+ EGL_OPENGL_ES2_BIT,
+ EGL_NONE,
+ };
+
+ EGLint num_configs;
+ if (!eglChooseConfig(display_, attribs, &config_, 1, &num_configs)) {
+ ALOGE("Couldn't find config");
+ return false;
+ }
+
+ std::unique_ptr<DummyNativeWindow> window(new DummyNativeWindow());
+
+ surface_ = eglCreateWindowSurface(display_, config_, window.get(), nullptr);
+ if (surface_ == EGL_NO_SURFACE) {
+ ALOGE("Failed to create egl surface");
+ return false;
+ }
+ window.release();
+
+ EGLint context_attribs[] = {EGL_CONTEXT_MAJOR_VERSION,
+ 3,
+ EGL_CONTEXT_MINOR_VERSION,
+ 1,
+ EGL_CONTEXT_PRIORITY_LEVEL_IMG,
+ EGL_CONTEXT_PRIORITY_HIGH_IMG,
+ EGL_NONE};
+ context_ = eglCreateContext(display_, config_, nullptr, context_attribs);
+ if (!eglMakeCurrent(display_, surface_, surface_, context_)) {
+ ALOGE("Unable to create GLESv2 context");
+ return false;
+ }
+
+ load_gl_extensions();
+
+ glEnable(BINNING_CONTROL_HINT_QCOM);
+ glHint(BINNING_CONTROL_HINT_QCOM, RENDER_DIRECT_TO_FRAMEBUFFER_QCOM);
+ is_render_direct_ = true;
+ CHECK_GL();
+
+ // Initialize the placeholder 1x1 framebuffer that we bind during compute
+ // shader instances to avoid accesses to other framebuffers.
+ glGenFramebuffers(1, &compute_fbo_);
+ glGenTextures(1, &compute_fbo_texture_);
+ glBindFramebuffer(GL_FRAMEBUFFER, compute_fbo_);
+ glBindTexture(GL_TEXTURE_2D, compute_fbo_texture_);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ nullptr);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ compute_fbo_texture_, 0);
+ CHECK_GL();
+ CHECK_GL_FBO();
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+
+ return true;
+}
+
+void Compositor::Shutdown() {
+ render_target_[0].Destroy();
+ render_target_[1].Destroy();
+ layers_.clear();
+ glDeleteFramebuffers(1, &compute_fbo_);
+ glDeleteTextures(1, &compute_fbo_texture_);
+
+ debug_hud_.reset();
+ eds_renderer_.reset();
+
+ if (context_) {
+ eglDestroyContext(display_, context_);
+ context_ = 0;
+ }
+
+ if (surface_ != EGL_NO_SURFACE) {
+ eglDestroySurface(display_, surface_);
+ surface_ = EGL_NO_SURFACE;
+ }
+}
+
+void Compositor::RemoveAllBuffers() { layers_.clear(); }
+
+void Compositor::UpdateSurfaces(
+ const std::vector<std::shared_ptr<DisplaySurface>>& surfaces) {
+ // Delete the removed surfaces.
+ layers_.erase(
+ std::remove_if(layers_.begin(), layers_.end(),
+ [&surfaces](const AppFrame& layer) {
+ for (const auto& surface : surfaces)
+ if (surface->surface_id() == layer.surface_id())
+ return false;
+ return true;
+ }),
+ layers_.end());
+ // New surfaces are added on-demand as buffers are posted.
+}
+
+Compositor::AppFrame::AppFrame()
+ : surface_id_(-1),
+ blur_(0.0f),
+ z_order_(0),
+ vertical_flip_(false),
+ enable_cac_(true),
+ render_buffer_index_(0) {}
+
+Compositor::AppFrame::~AppFrame() {}
+
+const Compositor::Texture* Compositor::AppFrame::GetGlTextureId(
+ EGLDisplay display, int index) {
+ auto buffer_consumer = buffer_.buffer();
+ if (!buffer_consumer) {
+ return nullptr;
+ }
+ auto texture_it = std::find_if(
+ textures_.begin(), textures_.end(),
+ [buffer_consumer, index](const std::shared_ptr<Texture>& t) {
+ return t->consumer() == buffer_consumer && t->index() == index;
+ });
+
+ if (texture_it != textures_.end()) {
+ return (*texture_it).get();
+ }
+
+ textures_.push_back(
+ std::make_shared<Texture>(buffer_consumer, display, index));
+ if (!textures_.back()->Initialize()) {
+ textures_.pop_back();
+ return nullptr;
+ }
+ return textures_.back().get();
+}
+
+bool Compositor::AppFrame::UpdateSurface(
+ const std::shared_ptr<DisplaySurface>& surface) {
+ int surface_id = surface->surface_id();
+ float blur = surface->manager_blur();
+ bool need_sort = false;
+ if (z_order_ != surface->layer_order()) {
+ need_sort = true;
+ z_order_ = surface->layer_order();
+ }
+
+ surface_id_ = surface_id;
+ if (!render_pose_buffer_object_) {
+ render_pose_buffer_object_.reset(
+ new RenderPoseBufferObject(surface->GetMetadataBufferFd()));
+ }
+
+ blur_ = blur;
+ vertical_flip_ =
+ !!(surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_VERTICAL_FLIP);
+ enable_cac_ =
+ !(surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_CAC);
+
+ AcquiredBuffer skipped_buffer;
+ AcquiredBuffer buffer =
+ surface->AcquireNewestAvailableBuffer(&skipped_buffer);
+ if (!skipped_buffer.IsEmpty()) {
+ DebugHudData::data.SkipLayerFrame(z_order_);
+ ATRACE_NAME("DropToCatchUp");
+ ATRACE_ASYNC_END("BufferPost", skipped_buffer.buffer()->id());
+ }
+ if (!buffer.IsEmpty()) {
+ DebugHudData::data.AddLayerFrame(z_order_);
+ // Buffer was already ready, so we don't need to wait on the fence.
+ buffer.ClaimAcquireFence().Close();
+ ATRACE_ASYNC_END("BufferPost", buffer.buffer()->id());
+
+ render_buffer_index_ = surface->GetRenderBufferIndex(buffer.buffer()->id());
+
+#ifdef TRACE
+ const volatile DisplaySurfaceMetadata* data =
+ surface->GetMetadataBufferPtr();
+#endif
+ ALOGE_IF(TRACE, "read pose index %d %f %f", render_buffer_index_,
+ data->orientation[render_buffer_index_][0],
+ data->orientation[render_buffer_index_][1]);
+
+ // Move the new buffer over the old. AcquiredBuffer releases the old one.
+ buffer_ = std::move(buffer);
+ }
+ return need_sort;
+}
+
+void Compositor::AppFrame::UpdateVideoMeshSurface(
+ const std::shared_ptr<DisplaySurface>& surface) {
+ // Update |video_compositors_| with |video_surface|. Note that
+ // |UpdateVideoMeshSurface| should only be called on the PostThread before
+ // |DrawFrame| is called. Thus, no synchronization is required for
+ // |video_compositors_|.
+ if (!surface->video_mesh_surfaces_updated())
+ return;
+
+ // TODO(jwcai) The following loop handles adding new surfaces; video mesh
+ // removal logic shall be handled by listening to |OnChannelClose| event from
+ // DisplayService.
+ for (const auto& video_surface : surface->GetVideoMeshSurfaces()) {
+ // Here we assume number of |video_surface|s is relatively small, thus, the
+ // merge should be efficient enough.
+ auto video_compositor_it = std::find_if(
+ video_compositors_.begin(), video_compositors_.end(),
+ [video_surface](const std::shared_ptr<VideoCompositor>& c) {
+ return c->surface_id() == video_surface->surface_id();
+ });
+
+ if (video_compositor_it == video_compositors_.end()) {
+ // This video surface is new, create a new VideoCompositor.
+ video_compositors_.push_back(std::make_shared<VideoCompositor>(
+ video_surface, surface->GetMetadataBufferPtr()));
+ } else {
+ // There is a compositor in |video_compositors_| is already set up for
+ // this |video_surface|.
+ ALOGW("Duplicated video_mesh_surface: surface_id=%d",
+ video_surface->surface_id());
+ }
+ }
+}
+
+void Compositor::AppFrame::ResetBlurrers() { blurrers_.clear(); }
+
+void Compositor::AppFrame::AddBlurrer(Blur* blurrer) {
+ blurrers_.emplace_back(blurrer);
+}
+
+void Compositor::PostBuffer(const std::shared_ptr<DisplaySurface>& surface) {
+ int surface_id = surface->surface_id();
+
+ ALOGD_IF(TRACE, "Post surface %d", surface_id);
+
+ auto layer_it = std::find_if(layers_.begin(), layers_.end(),
+ [surface_id](const AppFrame& frame) {
+ return frame.surface_id() == surface_id;
+ });
+
+ bool need_sort = false;
+ if (layer_it == layers_.end()) {
+ layers_.push_back(AppFrame());
+ layer_it = layers_.end() - 1;
+ need_sort = true;
+ }
+
+ need_sort |= layer_it->UpdateSurface(surface);
+ layer_it->UpdateVideoMeshSurface(surface);
+
+ if (need_sort) {
+ std::stable_sort(layers_.begin(), layers_.end());
+ }
+}
+
+std::vector<uint8_t> Compositor::ReadLayerPixels(size_t index, int* width,
+ int* height) {
+ if (index >= layers_.size()) {
+ return {};
+ }
+
+ const Texture* texture = layers_[index].GetGlTextureId(display_, 0);
+ if (!texture) {
+ return {};
+ }
+
+ *width = texture->size()[0];
+ *height = texture->size()[1];
+ return ReadTextureRGBA(texture->texture_id(), *width, *height);
+}
+
+std::vector<uint8_t> Compositor::ReadBufferPixels(const IonBuffer* buffer) {
+ android::sp<TemporaryNativeBuffer> native_buffer =
+ new TemporaryNativeBuffer(buffer);
+
+ // Finish to make sure the GL driver has completed drawing of prior FBOs.
+ // Since we are creating an EGL image here, the driver will not know that
+ // there is a dependency on earlier GL draws.
+ glFinish();
+
+ EGLImageKHR image = eglCreateImageKHR(
+ display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer.get()), nullptr);
+ if (!image) {
+ ALOGE("Failed to create EGLImage\n");
+ return {};
+ }
+
+ GLuint texture_id;
+ glGenTextures(1, &texture_id);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, texture_id);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+ int width = buffer->width();
+ int height = buffer->height();
+ std::vector<uint8_t> data = ReadTextureRGBA(texture_id, width, height);
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+ glDeleteTextures(1, &texture_id);
+ eglDestroyImageKHR(display_, image);
+ return data;
+}
+
+bool Compositor::DrawFrame(uint32_t target_vsync_count,
+ LocalHandle* buffer_fence_fd) {
+ CheckAndUpdateHeadMountMetrics(false);
+
+ ATRACE_NAME("Compositor::DrawFrame");
+ GpuProfiler::Get()->PollGlTimerQueries();
+
+ if (buffer_fence_fd)
+ buffer_fence_fd->Close();
+
+ int num_layers = 0;
+ const int kMaxLayers = 4;
+ GLuint texture_id[2][kMaxLayers] = {{0}};
+ GLuint render_pose_buffer_id[kMaxLayers] = {0};
+ uint32_t render_buffer_index[kMaxLayers] = {0};
+ bool vertical_flip[kMaxLayers] = {false};
+ bool separate_eye_textures[kMaxLayers] = {false};
+ bool enable_cac[kMaxLayers] = {};
+ CHECK_GL();
+ for (auto& layer : layers_) {
+ if (!layer.buffer().buffer()) {
+ ATRACE_NAME("no_buffer");
+ continue;
+ }
+
+ // Extract surface parameters.
+ render_buffer_index[num_layers] = layer.render_buffer_index();
+ render_pose_buffer_id[num_layers] =
+ layer.render_pose_buffer_object()->object_id();
+ vertical_flip[num_layers] = layer.vertical_flip();
+ enable_cac[num_layers] =
+ head_mount_metrics_.supports_chromatic_aberration_correction() &&
+ layer.enable_cac();
+
+ // Extract per-eye textures. These may be separate or joined (atlased).
+ vec2i size(0, 0);
+ int view_count = layer.buffer().buffer()->slice_count();
+ ALOGE_IF(view_count > 2, "Error: more than 2 views not supported");
+ view_count = std::min(2, view_count);
+ separate_eye_textures[num_layers] = (view_count > 1);
+ bool is_missing_texture = false;
+ for (int eye = 0; eye < 2; ++eye) {
+ // If view_count is 1, each eye texture is the 0th.
+ int view_index = (view_count == 2) ? eye : 0;
+ const Texture* texture = layer.GetGlTextureId(display_, view_index);
+ // Texture will be null if the EGL image creation fails (hopefully never).
+ if (!texture) {
+ is_missing_texture = true;
+ break;
+ }
+ // All views are currently expected to have the same size.
+ size = texture->size();
+ texture_id[eye][num_layers] = texture->texture_id();
+ }
+ if (is_missing_texture) {
+ continue;
+ }
+
+ // Perform blur if requested.
+ if (fabs(layer.blur()) > 0.001f) {
+ // No need for CAC on blurred layers.
+ enable_cac[num_layers] = false;
+ if (layer.blurrer_count() < 1 || layer.blurrer(0)->width() != size[0] ||
+ layer.blurrer(0)->height() != size[1]) {
+ // Blur is created with the left eye texture, but the same instance
+ // can be used for the right eye as well.
+ layer.ResetBlurrers();
+ layer.AddBlurrer(new Blur(size[0], size[1], texture_id[0][num_layers],
+ GL_TEXTURE_2D, GL_TEXTURE_2D, true, display_,
+ view_count));
+ }
+ // Reset blur instances to prepare for drawing.
+ layer.blurrer(0)->StartFrame();
+ layer.blurrer(0)->set_scale(layer.blur());
+ // Perform blur and replace source texture with blurred output texture.
+ if (view_count == 1) {
+ // Single wide buffer for both eyes, blur both eyes in one operation.
+ texture_id[0][num_layers] = texture_id[1][num_layers] =
+ layer.blurrer(0)->DrawBlur(texture_id[0][num_layers]);
+ } else {
+ // Split eye buffers in a single frame, blur each framebuffer.
+ texture_id[0][num_layers] =
+ layer.blurrer(0)->DrawBlur(texture_id[0][num_layers]);
+ texture_id[1][num_layers] =
+ layer.blurrer(0)->DrawBlur(texture_id[1][num_layers]);
+ }
+ }
+
+ ++num_layers;
+ if (num_layers >= kMaxLayers)
+ break;
+ }
+
+ CHECK_GL();
+ // Set appropriate binning mode for the number of layers.
+ if (num_layers > 1 && is_render_direct_) {
+ is_render_direct_ = false;
+ glDisable(BINNING_CONTROL_HINT_QCOM);
+ } else if (num_layers <= 1 && !is_render_direct_) {
+ is_render_direct_ = true;
+ glEnable(BINNING_CONTROL_HINT_QCOM);
+ glHint(BINNING_CONTROL_HINT_QCOM, RENDER_DIRECT_TO_FRAMEBUFFER_QCOM);
+ }
+
+ // Workaround for GL driver bug that causes the currently bound FBO to be
+ // accessed during a compute shader pass (DoLateLatch below). Based on an
+ // analysis with systrace, the best pattern here was to run the compute shader
+ // with a *different* FBO than what will be drawn to afterward. So we bind
+ // a dummy 1x1 FBO here and discard it. If instead, the current render target
+ // is bound during the compute shader, the following draw calls will be forced
+ // into direct mode rendering.
+ glBindFramebuffer(GL_FRAMEBUFFER, compute_fbo_);
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ glDiscardFramebufferEXT(GL_FRAMEBUFFER, 1, &attachment);
+
+ // Double buffer the render target. Get the render target we're drawing into,
+ // and update the active buffer to the next buffer.
+ RenderTarget& render_target = GetRenderTarget();
+ SetNextRenderTarget();
+
+ if (num_layers > 0) {
+ // This trace prints the EDS+Warp GPU overhead and prints every 5 seconds:
+ TRACE_GPU_PRINT("GPU EDS+Warp", 5 * 60);
+ CHECK_GL();
+ eds_renderer_->DoLateLatch(target_vsync_count, render_buffer_index,
+ render_pose_buffer_id, vertical_flip,
+ separate_eye_textures, num_layers);
+
+ render_target.BindFramebuffer();
+
+ // Discard to avoid unresolving the framebuffer during tiled rendering.
+ render_target.DiscardColorAttachment();
+
+ // For tiled mode rendering, we clear every frame to avoid garbage showing
+ // up in the parts of tiles that are not rendered.
+ if (!is_render_direct_) {
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ }
+
+ for (int eye = kLeftEye; eye <= kRightEye; ++eye) {
+ eds_renderer_->PrepGlState(static_cast<EyeType>(eye));
+ for (int layer_i = 0; layer_i < num_layers; ++layer_i) {
+ bool blend_with_previous = layer_i > 0;
+ uint32_t current_buffer_index = render_buffer_index[layer_i];
+
+ // Render video mesh in the background of each graphics layer.
+ layers_[layer_i].ForEachVideoCompositor([this, eye, layer_i,
+ current_buffer_index,
+ &blend_with_previous](
+ const std::shared_ptr<VideoCompositor>& video_compositor) mutable {
+ eds_renderer_->DrawVideoQuad(
+ static_cast<EyeType>(eye), layer_i,
+ video_compositor->GetActiveTextureId(display_),
+ video_compositor->GetTransform(eye, current_buffer_index));
+ blend_with_previous = true;
+ });
+
+ // Apply distortion to frame submitted from the app's GL context.
+ eds_renderer_->SetChromaticAberrationCorrectionEnabled(
+ enable_cac[layer_i]);
+ eds_renderer_->ApplyDistortionCorrectionToTexture(
+ static_cast<EyeType>(eye), &texture_id[eye][layer_i],
+ &vertical_flip[layer_i], &separate_eye_textures[layer_i], &layer_i,
+ 1, blend_with_previous, false);
+ }
+ }
+ eds_renderer_->ResetGlState(1);
+ CHECK_GL();
+ } else {
+ ALOGI("No buffers for compositing, clearing to black.");
+ render_target.BindFramebuffer();
+ glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+ glClear(GL_COLOR_BUFFER_BIT);
+ }
+
+ debug_hud_->Update();
+ debug_hud_->Draw();
+
+ LocalHandle fence_fd = CreateGLSyncAndFlush(display_);
+
+ if (buffer_fence_fd)
+ *buffer_fence_fd = std::move(fence_fd);
+
+ if (eds_pose_capture_enabled_) {
+ std::lock_guard<std::mutex> _lock(mutex_);
+ eds_renderer_->GetLastEdsPose(&eds_pose_capture_);
+ }
+
+ return true;
+}
+
+bool Compositor::GetLastEdsPose(LateLatchOutput* out_data) {
+ if (eds_pose_capture_enabled_) {
+ std::lock_guard<std::mutex> _lock(mutex_);
+ *out_data = eds_pose_capture_;
+ return true;
+ } else {
+ ALOGE("Eds pose capture is not enabled.");
+ return false;
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/compositor.h b/libs/vr/libvrflinger/compositor.h
new file mode 100644
index 0000000..be26d31
--- /dev/null
+++ b/libs/vr/libvrflinger/compositor.h
@@ -0,0 +1,233 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_COMPOSITOR_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_COMPOSITOR_H_
+
+#include <EGL/egl.h>
+#include <log/log.h>
+#include <utils/StrongPointer.h>
+
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <vector>
+
+#include <pdx/file_handle.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/composite_hmd.h>
+#include <private/dvr/display_metrics.h>
+#include <private/dvr/distortion_renderer.h>
+#include <private/dvr/frame_time_history.h>
+#include <private/dvr/ion_buffer.h>
+#include <private/dvr/native_buffer.h>
+
+#include "acquired_buffer.h"
+#include "video_compositor.h"
+struct DvrPose;
+
+namespace android {
+namespace dvr {
+
+class Blur;
+class BufferConsumer;
+class CompositeHmd;
+class DebugHudView;
+class DisplaySurface;
+
+// This is a GPU compositor for software EDS and lens warp on buffers provided
+// by HardwareComposer.
+class Compositor {
+ public:
+ Compositor();
+ ~Compositor();
+
+ bool Initialize(const DisplayMetrics& display_metrics);
+ void UpdateHeadMountMetrics(const HeadMountMetrics& head_mount_metrics);
+ void Shutdown();
+
+ // Renders a frame with the latest buffers with EDS and warp applied.
+ // buffer_fence_fd can be used to get a fence for the rendered frame. It can
+ // be set to null if the fence isn't needed.
+ bool DrawFrame(uint32_t target_vsync_count,
+ pdx::LocalHandle* buffer_fence_fd);
+
+ // Remove all buffers.
+ void RemoveAllBuffers();
+
+ // Synchronize compositor layers with in given surfaces.
+ void UpdateSurfaces(
+ const std::vector<std::shared_ptr<DisplaySurface>>& surfaces);
+
+ // This must be called for each surface before DrawFrame is called.
+ void PostBuffer(const std::shared_ptr<DisplaySurface>& surface);
+
+ std::shared_ptr<IonBuffer> GetBuffer() const {
+ return render_target_[active_render_target_].buffer();
+ }
+
+ // Returns the number of layers being rendered by the compositor.
+ size_t GetLayerCount() const { return layers_.size(); }
+
+ // Returns the source buffer at the given layer index or nullptr if none is
+ // available.
+ std::shared_ptr<BufferConsumer> PeekAtLayer(size_t index) const {
+ if (index >= GetLayerCount())
+ return nullptr;
+ return layers_[index].buffer().buffer();
+ }
+
+ // Expensive operation to transfer the pixels of the given layer index into
+ // unformatted memory and return as a RGBA buffer.
+ // On success, returns non-zero sized vector and sets width and height.
+ // On failure, returns empty vector.
+ std::vector<uint8_t> ReadLayerPixels(size_t index, int* width, int* height);
+
+ // Expensive operation to transfer the pixels of the given buffer into
+ // unformatted memory and return as a RGBA buffer.
+ // On success, returns non-zero sized vector.
+ // On failure, returns empty vector.
+ std::vector<uint8_t> ReadBufferPixels(const IonBuffer* buffer);
+
+ bool GetLastEdsPose(LateLatchOutput* out_data);
+
+ const HeadMountMetrics& head_mount_metrics() const {
+ return head_mount_metrics_;
+ }
+
+ private:
+ class Texture;
+ class RenderPoseBufferObject;
+
+ // A rendered frame from an application.
+ class AppFrame {
+ public:
+ AppFrame();
+ ~AppFrame();
+
+ AppFrame(AppFrame&& other) = default;
+ AppFrame& operator=(AppFrame&&) = default;
+
+ // Gets a GL texture object for the current buffer. The resulting texture
+ // object will be cached for future calls. Returns a pointer for temporary
+ // access - not meant to hold on to.
+ const Texture* GetGlTextureId(EGLDisplay display, int index);
+
+ bool operator<(const AppFrame& rhs) const {
+ return z_order_ < rhs.z_order_;
+ }
+ int z_order() const { return z_order_; }
+ // Return true if this surface z order has been changed.
+ bool UpdateSurface(const std::shared_ptr<DisplaySurface>& surface);
+ void UpdateVideoMeshSurface(const std::shared_ptr<DisplaySurface>& surface);
+ void ResetBlurrers();
+ void AddBlurrer(Blur* blurrer);
+
+ const AcquiredBuffer& buffer() const { return buffer_; }
+ int surface_id() const { return surface_id_; }
+ float blur() const { return blur_; }
+ bool vertical_flip() const { return vertical_flip_; }
+ bool enable_cac() const { return enable_cac_; }
+ size_t blurrer_count() const { return blurrers_.size(); }
+ Blur* blurrer(size_t i) {
+ return blurrers_.size() < i ? nullptr : blurrers_[i].get();
+ }
+ uint32_t render_buffer_index() const { return render_buffer_index_; }
+ const RenderPoseBufferObject* render_pose_buffer_object() const {
+ return render_pose_buffer_object_.get();
+ }
+
+ template <class A>
+ void ForEachVideoCompositor(A action) const {
+ for (auto& c : video_compositors_) {
+ action(c);
+ }
+ }
+
+ private:
+ int surface_id_;
+ float blur_;
+ int z_order_;
+ bool vertical_flip_;
+ bool enable_cac_;
+ std::vector<std::unique_ptr<Blur>> blurrers_;
+ AcquiredBuffer buffer_;
+ std::vector<std::shared_ptr<Texture>> textures_;
+ uint32_t render_buffer_index_;
+ std::unique_ptr<RenderPoseBufferObject> render_pose_buffer_object_;
+
+ // Active video mesh compositors
+ std::vector<std::shared_ptr<VideoCompositor>> video_compositors_;
+
+ AppFrame(const AppFrame& other) = delete;
+ AppFrame& operator=(const AppFrame&) = delete;
+ };
+
+ class RenderTarget {
+ public:
+ RenderTarget();
+ ~RenderTarget();
+
+ void Initialize(int width, int height);
+ void Destroy();
+ void BindFramebuffer();
+ void DiscardColorAttachment();
+
+ std::shared_ptr<IonBuffer> buffer() const { return buffer_; }
+
+ private:
+ std::shared_ptr<IonBuffer> buffer_;
+ android::sp<NativeBuffer> native_buffer_;
+
+ GLuint buffer_texture_id_;
+ GLuint buffer_framebuffer_id_;
+ EGLImageKHR buffer_image_;
+ };
+
+ Compositor(const Compositor&) = delete;
+ void operator=(const Compositor&) = delete;
+
+ bool InitializeEGL();
+
+ void UpdateHudToggle();
+ void PrintStatsHud();
+ void CheckAndUpdateHeadMountMetrics(bool force_update);
+
+ RenderTarget& GetRenderTarget() {
+ return render_target_[active_render_target_];
+ }
+
+ void SetNextRenderTarget() {
+ active_render_target_ = (active_render_target_ + 1) & 1;
+ }
+
+ std::vector<AppFrame> layers_;
+
+ DisplayMetrics display_metrics_;
+ HeadMountMetrics head_mount_metrics_;
+
+ EGLDisplay display_;
+ EGLConfig config_;
+ EGLSurface surface_;
+ EGLContext context_;
+ int active_render_target_;
+ RenderTarget render_target_[2];
+ bool is_render_direct_;
+
+ // FBO for compute shader.
+ GLuint compute_fbo_;
+ GLuint compute_fbo_texture_;
+
+ std::unique_ptr<DebugHudView> debug_hud_;
+
+ // EDS:
+ std::unique_ptr<CompositeHmd> composite_hmd_;
+ bool hmd_metrics_requires_update_;
+ std::unique_ptr<DistortionRenderer> eds_renderer_;
+
+ bool eds_pose_capture_enabled_;
+ std::mutex mutex_;
+ LateLatchOutput eds_pose_capture_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_COMPOSITOR_H_
diff --git a/libs/vr/libvrflinger/debug_hud_data.cpp b/libs/vr/libvrflinger/debug_hud_data.cpp
new file mode 100644
index 0000000..d387bba
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_data.cpp
@@ -0,0 +1,9 @@
+#include "debug_hud_data.h"
+
+namespace android {
+namespace dvr {
+
+DebugHudData DebugHudData::data;
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/debug_hud_data.h b/libs/vr/libvrflinger/debug_hud_data.h
new file mode 100644
index 0000000..778169d
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_data.h
@@ -0,0 +1,110 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_DATA_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_DATA_H_
+
+#include <stdint.h>
+
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/frame_time_history.h>
+
+namespace android {
+namespace dvr {
+
+// Tracks debug stats for the displayd debug HUD. Unless otherwise noted,
+// there is no synchronization of data accesses to avoid performance impact.
+// All accesses to this data are on the displayd HWC post thread. Accesses from
+// other threads will need to be duly protected from races.
+// This is a lightweight struct to make it easy to add and remove
+// tracking data.
+struct DebugHudData {
+ // Maximum supported layers for the debug HUD.
+ enum { kMaxLayers = 4 };
+
+ // The global singleton HUD data instance.
+ static DebugHudData data;
+
+ // Tracks framerate and skipped frames.
+ struct FrameStats {
+ void AddFrame() {
+ int64_t now = GetSystemClockNs();
+ frame_time.AddSample(now - last_frame_ts);
+ last_frame_ts = now;
+ }
+
+ void SkipFrame() {
+ AddFrame();
+ ++drops;
+ }
+
+ int drops = 0;
+ int64_t last_frame_ts = 0;
+ FrameTimeHistory frame_time;
+ };
+
+ // Debug data for compositor layers (applications, system UI, etc).
+ struct LayerData {
+ void Reset() {
+ ResetStats();
+ width = 0;
+ height = 0;
+ is_separate = false;
+ }
+
+ void ResetStats() { frame_stats.drops = 0; }
+
+ FrameStats frame_stats;
+ int width = 0;
+ int height = 0;
+ bool is_separate = false;
+ };
+
+ // Resets the stats.
+ void ResetStats() {
+ hwc_frame_stats.drops = 0;
+ hwc_latency = 0;
+ for (auto& l : layer_data)
+ l.ResetStats();
+ }
+
+ // Resets the layer configuration.
+ void ResetLayers() {
+ num_layers = 0;
+ for (auto& l : layer_data)
+ l.Reset();
+ }
+
+ // Tracks a frame arrival for the given layer.
+ void AddLayerFrame(size_t layer) {
+ if (layer < kMaxLayers) {
+ num_layers = std::max(layer + 1, num_layers);
+ layer_data[layer].frame_stats.AddFrame();
+ }
+ }
+
+ // Tracks a frame skip/drop for the given layer.
+ void SkipLayerFrame(size_t layer) {
+ if (layer < kMaxLayers) {
+ num_layers = std::max(layer + 1, num_layers);
+ layer_data[layer].frame_stats.SkipFrame();
+ }
+ }
+
+ // Sets the resolution and other details of the layer.
+ void SetLayerInfo(size_t layer, int width, int height, bool is_separate) {
+ if (layer < kMaxLayers) {
+ num_layers = std::max(layer + 1, num_layers);
+ layer_data[layer].width = width;
+ layer_data[layer].height = height;
+ layer_data[layer].is_separate = is_separate;
+ }
+ }
+
+ FrameStats hwc_frame_stats;
+ int64_t hwc_latency = 0;
+ size_t num_layers = 0;
+ LayerData layer_data[kMaxLayers];
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_DATA_H_
diff --git a/libs/vr/libvrflinger/debug_hud_view.cpp b/libs/vr/libvrflinger/debug_hud_view.cpp
new file mode 100644
index 0000000..4936ac6
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_view.cpp
@@ -0,0 +1,91 @@
+#include "debug_hud_view.h"
+
+#include <dvr/pose_client.h>
+
+#include "debug_hud_data.h"
+
+namespace android {
+namespace dvr {
+
+DebugHudView::DebugHudView(const CompositeHmd& hmd) {
+ pose_client_ = dvrPoseCreate();
+
+ display_size_ = hmd.GetDisplayMetrics().GetSizePixels();
+ vec2 display_size_meters = hmd.GetDisplayMetrics().GetSizeMeters();
+ inter_lens_dist_screen_space_ =
+ 2.0f * hmd.GetHeadMountMetrics().GetInterLensDistance() /
+ std::max(display_size_meters[0], display_size_meters[1]);
+}
+
+DebugHudView::~DebugHudView() {
+ if (pose_client_)
+ dvrPoseDestroy(pose_client_);
+ pose_client_ = nullptr;
+}
+
+void DebugHudView::Update() {
+ // Check for gesture that enables the debug stats HUD.
+ if (!pose_client_)
+ return;
+ DvrPoseAsync pose;
+ dvrPoseGet(pose_client_, 0, &pose);
+ float32x4_t q = pose.orientation;
+ quat orientation(q[3], q[0], q[1], q[2]);
+ vec3 up = orientation * vec3(0, 1, 0);
+ if (up[1] < -0.8f) {
+ ++switch_timer_;
+ } else {
+ switch_timer_ = 0;
+ }
+ // A few seconds upside down => toggle stats HUD.
+ if (switch_timer_ > 200) {
+ switch_timer_ = 0;
+ enabled_ = !enabled_;
+ DebugHudData::data.ResetStats();
+ ALOGE("Toggle debug stats HUD: %s", enabled_ ? "ON" : "OFF");
+ }
+}
+
+void DebugHudView::Draw() {
+ if (!enabled_)
+ return;
+ if (!debug_text_)
+ debug_text_.reset(new DebugText(400, display_size_[0], display_size_[1]));
+
+ const DebugHudData& data = DebugHudData::data;
+ const size_t layer_char_count = 50;
+ char layer_data[DebugHudData::kMaxLayers][layer_char_count];
+ for (size_t i = 0; i < data.num_layers; ++i) {
+ float fps = data.layer_data[i].frame_stats.frame_time.GetAverageFps();
+ snprintf(layer_data[i], layer_char_count,
+ "Layer %d %dx%d%s FPS: %.2f Drops: %d\n", static_cast<int>(i),
+ data.layer_data[i].width, data.layer_data[i].height,
+ data.layer_data[i].is_separate ? "x2" : "", fps,
+ data.layer_data[i].frame_stats.drops);
+ }
+
+ float hwc_fps = data.hwc_frame_stats.frame_time.GetAverageFps();
+
+ char text[400];
+ float hwc_latency_ms = static_cast<float>(data.hwc_latency) / 1000000.0f;
+ snprintf(text, sizeof(text), "HWC FPS: %.2f Latency: %.3f ms Skips: %d\n",
+ hwc_fps, hwc_latency_ms, data.hwc_frame_stats.drops);
+
+ for (size_t i = 0; i < data.num_layers; ++i) {
+ strncat(text, layer_data[i], sizeof(text) - strlen(text) - 1);
+ }
+
+ // Ensure text termination.
+ text[sizeof(text) - 1] = '\0';
+
+ glViewport(0, 0, display_size_[0], display_size_[1]);
+ glEnable(GL_BLEND);
+ // No stereo, because you can see the HUD OK in one eye. Stereo actually
+ // makes it more difficult to focus sometimes. To enable stereo:
+ // replace the second to last parameter with inter_lens_dist_screen_space_.
+ debug_text_->Draw(0.0f, -0.7f * inter_lens_dist_screen_space_, text, 0.0f, 1);
+ glDisable(GL_BLEND);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/debug_hud_view.h b/libs/vr/libvrflinger/debug_hud_view.h
new file mode 100644
index 0000000..50f38a8
--- /dev/null
+++ b/libs/vr/libvrflinger/debug_hud_view.h
@@ -0,0 +1,48 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_VIEW_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_VIEW_H_
+
+#include <stdint.h>
+
+#include <utils/Log.h>
+
+#include <private/dvr/composite_hmd.h>
+#include <private/dvr/graphics/debug_text.h>
+
+struct DvrPose;
+
+namespace android {
+namespace dvr {
+
+class CompositeHmd;
+
+// The view and the controller for the displayd debug HUD.
+// The HUD is enabled and disabled by internally tracking the head pose.
+// When the head pose is upside down for ~3 seconds, the enabled state toggles.
+// See DebugHudData for the data that is reported.
+class DebugHudView {
+ public:
+ DebugHudView(const CompositeHmd& hmd);
+ ~DebugHudView();
+
+ // Updates HUD state.
+ void Update();
+
+ // Draws HUD into the current framebuffer if it is currently enabled.
+ void Draw();
+
+ private:
+ DebugHudView(const DebugHudView&) = delete;
+ DebugHudView& operator=(const DebugHudView&) = delete;
+
+ DvrPose* pose_client_ = nullptr;
+ vec2i display_size_;
+ bool enabled_ = false;
+ int switch_timer_ = 0;
+ float inter_lens_dist_screen_space_ = 0.0f;
+ std::unique_ptr<DebugText> debug_text_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DEBUG_HUD_VIEW_H_
diff --git a/libs/vr/libvrflinger/display_manager_service.cpp b/libs/vr/libvrflinger/display_manager_service.cpp
new file mode 100644
index 0000000..6730ba8
--- /dev/null
+++ b/libs/vr/libvrflinger/display_manager_service.cpp
@@ -0,0 +1,225 @@
+#include "display_manager_service.h"
+
+#include <pdx/channel_handle.h>
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/display_rpc.h>
+#include <sys/poll.h>
+
+#include <array>
+
+using android::pdx::Channel;
+using android::pdx::LocalChannelHandle;
+using android::pdx::Message;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::IfAnyOf;
+
+namespace {
+
+// As a first line of defense, the display manager endpoint is only accessible
+// to the user and group.
+
+// TODO(dnicoara): Remove read/write permission for others. This is in here just
+// to allow us to experiment with cast functionality from a plain old app.
+constexpr mode_t kDisplayManagerEndpointFileMode =
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+
+constexpr size_t kMaxSurfacesPerRequest = 32;
+
+} // anonymous namespace
+
+namespace android {
+namespace dvr {
+
+void DisplayManager::SetNotificationsPending(bool pending) {
+ int ret = service_->ModifyChannelEvents(channel_id_, pending ? 0 : POLLIN,
+ pending ? POLLIN : 0);
+ ALOGE_IF(ret < 0,
+ "DisplayManager::SetNotificationPending: Failed to modify channel "
+ "events: %s",
+ strerror(-ret));
+}
+
+DisplayManagerService::DisplayManagerService(
+ const std::shared_ptr<DisplayService>& display_service)
+ : BASE("DisplayManagerService",
+ Endpoint::Create(DisplayManagerRPC::kClientPath,
+ kDisplayManagerEndpointFileMode)),
+ display_service_(display_service) {
+ display_service_->SetDisplayConfigurationUpdateNotifier(
+ std::bind(&DisplayManagerService::OnDisplaySurfaceChange, this));
+}
+
+std::shared_ptr<pdx::Channel> DisplayManagerService::OnChannelOpen(
+ pdx::Message& message) {
+ // Prevent more than one display manager from registering at a time.
+ if (display_manager_)
+ REPLY_ERROR_RETURN(message, EPERM, nullptr);
+
+ display_manager_ =
+ std::make_shared<DisplayManager>(this, message.GetChannelId());
+ return display_manager_;
+}
+
+void DisplayManagerService::OnChannelClose(
+ pdx::Message& /*message*/, const std::shared_ptr<pdx::Channel>& channel) {
+ // Unregister the display manager when the channel closes.
+ if (display_manager_ == channel)
+ display_manager_ = nullptr;
+}
+
+int DisplayManagerService::HandleMessage(pdx::Message& message) {
+ auto channel = std::static_pointer_cast<DisplayManager>(message.GetChannel());
+
+ switch (message.GetOp()) {
+ case DisplayManagerRPC::GetSurfaceList::Opcode:
+ DispatchRemoteMethod<DisplayManagerRPC::GetSurfaceList>(
+ *this, &DisplayManagerService::OnGetSurfaceList, message);
+ return 0;
+
+ case DisplayManagerRPC::GetSurfaceBuffers::Opcode:
+ DispatchRemoteMethod<DisplayManagerRPC::GetSurfaceBuffers>(
+ *this, &DisplayManagerService::OnGetSurfaceBuffers, message);
+ return 0;
+
+ case DisplayManagerRPC::UpdateSurfaces::Opcode:
+ DispatchRemoteMethod<DisplayManagerRPC::UpdateSurfaces>(
+ *this, &DisplayManagerService::OnUpdateSurfaces, message);
+ return 0;
+
+ default:
+ return Service::DefaultHandleMessage(message);
+ }
+}
+
+std::vector<DisplaySurfaceInfo> DisplayManagerService::OnGetSurfaceList(
+ pdx::Message& /*message*/) {
+ std::vector<DisplaySurfaceInfo> items;
+
+ display_service_->ForEachDisplaySurface([&items](
+ const std::shared_ptr<DisplaySurface>& surface) mutable {
+ DisplaySurfaceInfo item;
+
+ item.surface_id = surface->surface_id();
+ item.process_id = surface->process_id();
+ item.type = surface->type();
+ item.flags = 0; // TODO(eieio)
+ item.client_attributes = DisplaySurfaceAttributes{
+ {DisplaySurfaceAttributeEnum::Visible,
+ DisplaySurfaceAttributeValue{surface->client_visible()}},
+ {DisplaySurfaceAttributeEnum::ZOrder,
+ DisplaySurfaceAttributeValue{surface->client_z_order()}},
+ {DisplaySurfaceAttributeEnum::Blur, DisplaySurfaceAttributeValue{0.f}}};
+ item.manager_attributes = DisplaySurfaceAttributes{
+ {DisplaySurfaceAttributeEnum::Visible,
+ DisplaySurfaceAttributeValue{surface->manager_visible()}},
+ {DisplaySurfaceAttributeEnum::ZOrder,
+ DisplaySurfaceAttributeValue{surface->manager_z_order()}},
+ {DisplaySurfaceAttributeEnum::Blur,
+ DisplaySurfaceAttributeValue{surface->manager_blur()}}};
+
+ items.push_back(item);
+ });
+
+ // The fact that we're in the message handler implies that display_manager_ is
+ // not nullptr. No check required, unless this service becomes multi-threaded.
+ display_manager_->SetNotificationsPending(false);
+
+ return items;
+}
+
+std::vector<LocalChannelHandle> DisplayManagerService::OnGetSurfaceBuffers(
+ pdx::Message& message, int surface_id) {
+ std::shared_ptr<DisplaySurface> surface =
+ display_service_->GetDisplaySurface(surface_id);
+ if (!surface)
+ REPLY_ERROR_RETURN(message, ENOENT, {});
+
+ std::vector<LocalChannelHandle> consumers;
+ int ret = surface->GetConsumers(&consumers);
+ if (ret < 0) {
+ ALOGE(
+ "DisplayManagerService::OnGetDisplaySurfaceBuffers: Failed to get "
+ "consumers for surface %d: %s",
+ surface_id, strerror(-ret));
+ REPLY_ERROR_RETURN(message, -ret, {});
+ }
+
+ return consumers;
+}
+
+int DisplayManagerService::OnUpdateSurfaces(
+ pdx::Message& /*message*/,
+ const std::map<int, DisplaySurfaceAttributes>& updates) {
+ for (const auto& surface_update : updates) {
+ const int surface_id = surface_update.first;
+ const DisplaySurfaceAttributes& attributes = surface_update.second;
+
+ std::shared_ptr<DisplaySurface> surface =
+ display_service_->GetDisplaySurface(surface_id);
+
+ if (!surface)
+ return -ENOENT;
+
+ for (const auto& attribute : attributes) {
+ const auto& key = attribute.first;
+ const auto* variant = &attribute.second;
+ bool invalid_value = false;
+ switch (key) {
+ case DisplaySurfaceAttributeEnum::ZOrder:
+ invalid_value =
+ !IfAnyOf<int32_t>::Call(variant, [&surface](const auto& value) {
+ surface->ManagerSetZOrder(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::Visible:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [&surface](const auto& value) {
+ surface->ManagerSetVisible(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::Blur:
+ invalid_value = !IfAnyOf<int32_t, int64_t, float>::Call(
+ variant, [&surface](const auto& value) {
+ surface->ManagerSetBlur(value);
+ });
+ break;
+ default:
+ ALOGW(
+ "DisplayManagerService::OnUpdateSurfaces: Attempt to set invalid "
+ "attribute %u on surface %d",
+ key, surface_id);
+ break;
+ }
+
+ if (invalid_value) {
+ ALOGW(
+ "DisplayManagerService::OnUpdateSurfaces: Failed to set display "
+ "surface attribute '%s' because of incompatible type: %d",
+ DisplaySurfaceAttributeEnum::ToString(key).c_str(),
+ variant->index());
+ }
+ }
+ }
+
+ // Reconfigure the display layers for any active surface changes.
+ display_service_->UpdateActiveDisplaySurfaces();
+ return 0;
+}
+
+void DisplayManagerService::OnDisplaySurfaceChange() {
+ if (display_manager_) {
+ display_manager_->SetNotificationsPending(true);
+ } else {
+ // If there isn't a display manager registered, default all display surfaces
+ // to visible.
+ display_service_->ForEachDisplaySurface(
+ [](const std::shared_ptr<DisplaySurface>& surface) {
+ surface->ManagerSetVisible(true);
+ });
+ display_service_->UpdateActiveDisplaySurfaces();
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/display_manager_service.h b/libs/vr/libvrflinger/display_manager_service.h
new file mode 100644
index 0000000..46401fa
--- /dev/null
+++ b/libs/vr/libvrflinger/display_manager_service.h
@@ -0,0 +1,73 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_MANAGER_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_MANAGER_SERVICE_H_
+
+#include <pdx/service.h>
+#include <private/dvr/display_rpc.h>
+
+#include "display_service.h"
+
+namespace android {
+namespace dvr {
+
+class DisplayManagerService;
+
+// The display manager is a client of the display manager service. This class
+// represents the connected client that the display manager service sends
+// notifications to.
+class DisplayManager : public pdx::Channel {
+ public:
+ DisplayManager(DisplayManagerService* service, int channel_id)
+ : service_(service), channel_id_(channel_id) {}
+
+ int channel_id() const { return channel_id_; }
+
+ // Sets or clears the channel event mask to indicate pending events that the
+ // display manager on the other end of the channel should read and handle.
+ // When |pending| is true the POLLIN bit is set in the event mask; when
+ // |pending| is false the POLLIN bit is cleared in the event mask.
+ void SetNotificationsPending(bool pending);
+
+ private:
+ DisplayManager(const DisplayManager&) = delete;
+ void operator=(const DisplayManager&) = delete;
+
+ DisplayManagerService* service_;
+ int channel_id_;
+};
+
+// The display manager service marshalls state and events from the display
+// service to the display manager.
+class DisplayManagerService : public pdx::ServiceBase<DisplayManagerService> {
+ public:
+ std::shared_ptr<pdx::Channel> OnChannelOpen(pdx::Message& message) override;
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+ int HandleMessage(pdx::Message& message) override;
+
+ private:
+ friend BASE;
+
+ explicit DisplayManagerService(
+ const std::shared_ptr<DisplayService>& display_service);
+
+ std::vector<DisplaySurfaceInfo> OnGetSurfaceList(pdx::Message& message);
+ std::vector<pdx::LocalChannelHandle> OnGetSurfaceBuffers(
+ pdx::Message& message, int surface_id);
+ int OnUpdateSurfaces(pdx::Message& message,
+ const std::map<int, DisplaySurfaceAttributes>& updates);
+
+ // Called by the display service to indicate changes to display surfaces that
+ // the display manager should evaluate.
+ void OnDisplaySurfaceChange();
+
+ DisplayManagerService(const DisplayManagerService&) = delete;
+ void operator=(const DisplayManagerService&) = delete;
+
+ std::shared_ptr<DisplayService> display_service_;
+ std::shared_ptr<DisplayManager> display_manager_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_MANAGER_SERVICE_H_
diff --git a/libs/vr/libvrflinger/display_service.cpp b/libs/vr/libvrflinger/display_service.cpp
new file mode 100644
index 0000000..c464c98
--- /dev/null
+++ b/libs/vr/libvrflinger/display_service.cpp
@@ -0,0 +1,332 @@
+#include "display_service.h"
+
+#include <vector>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <pdx/rpc/remote_method.h>
+#include <private/dvr/composite_hmd.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/lucid_metrics.h>
+#include <private/dvr/numeric.h>
+#include <private/dvr/polynomial_radial_distortion.h>
+#include <private/dvr/types.h>
+
+using android::pdx::Channel;
+using android::pdx::Message;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::WrapBuffer;
+
+namespace android {
+namespace dvr {
+
+DisplayService::DisplayService() : DisplayService(nullptr) {}
+
+DisplayService::DisplayService(Hwc2::Composer* hidl)
+ : BASE("DisplayService", Endpoint::Create(DisplayRPC::kClientPath)),
+ hardware_composer_(hidl) {}
+
+std::string DisplayService::DumpState(size_t max_length) {
+ std::vector<char> buffer(max_length);
+ uint32_t max_len_p = static_cast<uint32_t>(max_length);
+ hardware_composer_.Dump(buffer.data(), &max_len_p);
+ return std::string(buffer.data());
+}
+
+void DisplayService::OnChannelClose(pdx::Message& /*message*/,
+ const std::shared_ptr<Channel>& channel) {
+ auto surface = std::static_pointer_cast<SurfaceChannel>(channel);
+ if (surface && surface->type() == SurfaceTypeEnum::Normal) {
+ auto display_surface = std::static_pointer_cast<DisplaySurface>(surface);
+ display_surface->ManagerSetVisible(false);
+ display_surface->ClientSetVisible(false);
+ NotifyDisplayConfigurationUpdate();
+ }
+ // TODO(jwcai) Handle ChannelClose of VideoMeshSurface.
+}
+
+// First-level dispatch for display service messages. Directly handles messages
+// that are independent of the display surface (metrics, creation) and routes
+// surface-specific messages to the per-instance handlers.
+int DisplayService::HandleMessage(pdx::Message& message) {
+ auto channel = message.GetChannel<SurfaceChannel>();
+
+ switch (message.GetOp()) {
+ case DisplayRPC::GetMetrics::Opcode:
+ DispatchRemoteMethod<DisplayRPC::GetMetrics>(
+ *this, &DisplayService::OnGetMetrics, message);
+ return 0;
+
+ case DisplayRPC::GetEdsCapture::Opcode:
+ DispatchRemoteMethod<DisplayRPC::GetEdsCapture>(
+ *this, &DisplayService::OnGetEdsCapture, message);
+ return 0;
+
+ case DisplayRPC::CreateSurface::Opcode:
+ DispatchRemoteMethod<DisplayRPC::CreateSurface>(
+ *this, &DisplayService::OnCreateSurface, message);
+ return 0;
+
+ case DisplayRPC::EnterVrMode::Opcode:
+ DispatchRemoteMethod<DisplayRPC::EnterVrMode>(
+ *this, &DisplayService::OnEnterVrMode, message);
+ return 0;
+
+ case DisplayRPC::ExitVrMode::Opcode:
+ DispatchRemoteMethod<DisplayRPC::ExitVrMode>(
+ *this, &DisplayService::OnExitVrMode, message);
+ return 0;
+
+ case DisplayRPC::SetViewerParams::Opcode:
+ DispatchRemoteMethod<DisplayRPC::SetViewerParams>(
+ *this, &DisplayService::OnSetViewerParams, message);
+ return 0;
+
+ // Direct the surface specific messages to the surface instance.
+ case DisplayRPC::AllocateBuffer::Opcode:
+ case DisplayRPC::SetAttributes::Opcode:
+ case DisplayRPC::GetMetadataBuffer::Opcode:
+ case DisplayRPC::CreateVideoMeshSurface::Opcode:
+ case DisplayRPC::VideoMeshSurfaceCreateProducerQueue::Opcode:
+ return HandleSurfaceMessage(message);
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+SystemDisplayMetrics DisplayService::OnGetMetrics(pdx::Message& message) {
+ const Compositor* compositor = hardware_composer_.GetCompositor();
+ if (compositor == nullptr)
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+
+ HeadMountMetrics head_mount = compositor->head_mount_metrics();
+ CompositeHmd hmd(head_mount, hardware_composer_.GetHmdDisplayMetrics());
+ vec2i distorted_render_size = hmd.GetRecommendedRenderTargetSize();
+ FieldOfView left_fov = hmd.GetEyeFov(kLeftEye);
+ FieldOfView right_fov = hmd.GetEyeFov(kRightEye);
+
+ SystemDisplayMetrics metrics;
+
+ metrics.display_native_width = GetDisplayMetrics().width;
+ metrics.display_native_height = GetDisplayMetrics().height;
+ metrics.display_x_dpi = GetDisplayMetrics().dpi.x;
+ metrics.display_y_dpi = GetDisplayMetrics().dpi.y;
+ metrics.distorted_width = distorted_render_size[0];
+ metrics.distorted_height = distorted_render_size[1];
+ metrics.vsync_period_ns =
+ hardware_composer_.native_display_metrics().vsync_period_ns;
+ metrics.hmd_ipd_mm = 0;
+ metrics.inter_lens_distance_m = head_mount.GetInterLensDistance();
+ metrics.left_fov_lrbt[0] = left_fov.GetLeft();
+ metrics.left_fov_lrbt[1] = left_fov.GetRight();
+ metrics.left_fov_lrbt[2] = left_fov.GetBottom();
+ metrics.left_fov_lrbt[3] = left_fov.GetTop();
+ metrics.right_fov_lrbt[0] = right_fov.GetLeft();
+ metrics.right_fov_lrbt[1] = right_fov.GetRight();
+ metrics.right_fov_lrbt[2] = right_fov.GetBottom();
+ metrics.right_fov_lrbt[3] = right_fov.GetTop();
+
+ return metrics;
+}
+
+// Creates a new DisplaySurface and associates it with this channel. This may
+// only be done once per channel.
+int DisplayService::OnCreateSurface(pdx::Message& message, int width,
+ int height, int format, int usage,
+ DisplaySurfaceFlags flags) {
+ // A surface may only be created once per channel.
+ if (message.GetChannel())
+ return -EINVAL;
+
+ ALOGI_IF(TRACE, "DisplayService::OnCreateSurface: cid=%d",
+ message.GetChannelId());
+
+ // Use the channel id as the unique surface id.
+ const int surface_id = message.GetChannelId();
+ const int process_id = message.GetProcessId();
+
+ ALOGI_IF(TRACE,
+ "DisplayService::OnCreateSurface: surface_id=%d process_id=%d "
+ "width=%d height=%d format=%x usage=%x flags=%x",
+ surface_id, process_id, width, height, format, usage, flags);
+
+ // TODO(eieio,jbates): Validate request parameters.
+ auto channel = std::make_shared<DisplaySurface>(
+ this, surface_id, process_id, width, height, format, usage, flags);
+
+ message.SetChannel(channel);
+ NotifyDisplayConfigurationUpdate();
+ return 0;
+}
+
+DisplayRPC::ByteBuffer DisplayService::OnGetEdsCapture(pdx::Message& message) {
+ Compositor* compositor = hardware_composer_.GetCompositor();
+ if (compositor == nullptr)
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+
+ std::vector<std::uint8_t> buffer(sizeof(LateLatchOutput));
+
+ if (!compositor->GetLastEdsPose(
+ reinterpret_cast<LateLatchOutput*>(buffer.data()))) {
+ REPLY_ERROR_RETURN(message, EPERM, {});
+ }
+
+ return WrapBuffer(std::move(buffer));
+}
+
+int DisplayService::OnEnterVrMode(pdx::Message& /*message*/) {
+ hardware_composer_.Resume();
+ return 0;
+}
+
+int DisplayService::OnExitVrMode(pdx::Message& /*message*/) {
+ hardware_composer_.Suspend();
+ return 0;
+}
+
+void DisplayService::OnSetViewerParams(pdx::Message& message,
+ const ViewerParams& view_params) {
+ Compositor* compositor = hardware_composer_.GetCompositor();
+ if (compositor == nullptr)
+ REPLY_ERROR_RETURN(message, EINVAL);
+
+ FieldOfView left(55.0f, 55.0f, 55.0f, 55.0f);
+ FieldOfView right(55.0f, 55.0f, 55.0f, 55.0f);
+ if (view_params.left_eye_field_of_view_angles.size() >= 4) {
+ left = FieldOfView(ToRad(view_params.left_eye_field_of_view_angles[0]),
+ ToRad(view_params.left_eye_field_of_view_angles[1]),
+ ToRad(view_params.left_eye_field_of_view_angles[2]),
+ ToRad(view_params.left_eye_field_of_view_angles[3]));
+ right = FieldOfView(ToRad(view_params.left_eye_field_of_view_angles[1]),
+ ToRad(view_params.left_eye_field_of_view_angles[0]),
+ ToRad(view_params.left_eye_field_of_view_angles[2]),
+ ToRad(view_params.left_eye_field_of_view_angles[3]));
+ }
+
+ std::shared_ptr<ColorChannelDistortion> red_distortion;
+ std::shared_ptr<ColorChannelDistortion> green_distortion;
+ std::shared_ptr<ColorChannelDistortion> blue_distortion;
+
+ // We should always have a red distortion.
+ LOG_FATAL_IF(view_params.distortion_coefficients_r.empty());
+ red_distortion = std::make_shared<PolynomialRadialDistortion>(
+ view_params.distortion_coefficients_r);
+
+ if (!view_params.distortion_coefficients_g.empty()) {
+ green_distortion = std::make_shared<PolynomialRadialDistortion>(
+ view_params.distortion_coefficients_g);
+ }
+
+ if (!view_params.distortion_coefficients_b.empty()) {
+ blue_distortion = std::make_shared<PolynomialRadialDistortion>(
+ view_params.distortion_coefficients_b);
+ }
+
+ HeadMountMetrics::EyeOrientation left_orientation =
+ HeadMountMetrics::EyeOrientation::kCCW0Degrees;
+ HeadMountMetrics::EyeOrientation right_orientation =
+ HeadMountMetrics::EyeOrientation::kCCW0Degrees;
+
+ if (view_params.eye_orientations.size() > 1) {
+ left_orientation = static_cast<HeadMountMetrics::EyeOrientation>(
+ view_params.eye_orientations[0]);
+ right_orientation = static_cast<HeadMountMetrics::EyeOrientation>(
+ view_params.eye_orientations[1]);
+ }
+
+ HeadMountMetrics head_mount_metrics(
+ view_params.inter_lens_distance, view_params.tray_to_lens_distance,
+ view_params.screen_to_lens_distance,
+ static_cast<HeadMountMetrics::VerticalAlignment>(
+ view_params.vertical_alignment),
+ left, right, red_distortion, green_distortion, blue_distortion,
+ left_orientation, right_orientation,
+ view_params.screen_center_to_lens_distance);
+
+ compositor->UpdateHeadMountMetrics(head_mount_metrics);
+}
+
+// Calls the message handler for the DisplaySurface associated with this
+// channel.
+int DisplayService::HandleSurfaceMessage(pdx::Message& message) {
+ auto surface = std::static_pointer_cast<SurfaceChannel>(message.GetChannel());
+ ALOGW_IF(!surface,
+ "DisplayService::HandleSurfaceMessage: surface is nullptr!");
+
+ if (surface)
+ return surface->HandleMessage(message);
+ else
+ REPLY_ERROR_RETURN(message, EINVAL, 0);
+}
+
+std::shared_ptr<DisplaySurface> DisplayService::GetDisplaySurface(
+ int surface_id) const {
+ return std::static_pointer_cast<DisplaySurface>(GetChannel(surface_id));
+}
+
+std::vector<std::shared_ptr<DisplaySurface>>
+DisplayService::GetDisplaySurfaces() const {
+ return GetChannels<DisplaySurface>();
+}
+
+std::vector<std::shared_ptr<DisplaySurface>>
+DisplayService::GetVisibleDisplaySurfaces() const {
+ std::vector<std::shared_ptr<DisplaySurface>> visible_surfaces;
+
+ ForEachDisplaySurface(
+ [&](const std::shared_ptr<DisplaySurface>& surface) mutable {
+ if (surface->IsVisible())
+ visible_surfaces.push_back(surface);
+ });
+
+ return visible_surfaces;
+}
+
+int DisplayService::UpdateActiveDisplaySurfaces() {
+ auto visible_surfaces = GetVisibleDisplaySurfaces();
+
+ // Sort the surfaces based on manager z order first, then client z order.
+ std::sort(visible_surfaces.begin(), visible_surfaces.end(),
+ [](const std::shared_ptr<DisplaySurface>& a,
+ const std::shared_ptr<DisplaySurface>& b) {
+ return a->manager_z_order() != b->manager_z_order()
+ ? a->manager_z_order() < b->manager_z_order()
+ : a->client_z_order() < b->client_z_order();
+ });
+
+ ALOGD_IF(TRACE,
+ "DisplayService::UpdateActiveDisplaySurfaces: %zd visible surfaces",
+ visible_surfaces.size());
+
+ // TODO(jbates) Have the shell manage blurred layers.
+ bool blur_requested = false;
+ auto end = visible_surfaces.crend();
+ for (auto it = visible_surfaces.crbegin(); it != end; ++it) {
+ auto surface = *it;
+ // Surfaces with exclude_from_blur==true are not blurred
+ // and are excluded from blur computation of other layers.
+ if (surface->client_exclude_from_blur()) {
+ surface->ManagerSetBlur(0.0f);
+ continue;
+ }
+ surface->ManagerSetBlur(blur_requested ? 1.0f : 0.0f);
+ if (surface->client_blur_behind())
+ blur_requested = true;
+ }
+ return hardware_composer_.SetDisplaySurfaces(std::move(visible_surfaces));
+}
+
+void DisplayService::SetDisplayConfigurationUpdateNotifier(
+ DisplayConfigurationUpdateNotifier update_notifier) {
+ update_notifier_ = update_notifier;
+}
+
+void DisplayService::NotifyDisplayConfigurationUpdate() {
+ if (update_notifier_)
+ update_notifier_();
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/display_service.h b/libs/vr/libvrflinger/display_service.h
new file mode 100644
index 0000000..ebd97de
--- /dev/null
+++ b/libs/vr/libvrflinger/display_service.h
@@ -0,0 +1,107 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SERVICE_H_
+
+#include <pdx/service.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/late_latch.h>
+
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "acquired_buffer.h"
+#include "display_surface.h"
+#include "epoll_event_dispatcher.h"
+#include "hardware_composer.h"
+
+namespace android {
+namespace dvr {
+
+// DisplayService implements the displayd display service over ServiceFS.
+class DisplayService : public pdx::ServiceBase<DisplayService> {
+ public:
+ std::string DumpState(size_t max_length) override;
+
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+ int HandleMessage(pdx::Message& message) override;
+
+ std::shared_ptr<DisplaySurface> GetDisplaySurface(int surface_id) const;
+ std::vector<std::shared_ptr<DisplaySurface>> GetDisplaySurfaces() const;
+ std::vector<std::shared_ptr<DisplaySurface>> GetVisibleDisplaySurfaces()
+ const;
+
+ // Updates the list of actively displayed surfaces. This must be called after
+ // any change to client/manager attributes that affect visibility or z order.
+ int UpdateActiveDisplaySurfaces();
+
+ template <class A>
+ void ForEachDisplaySurface(A action) const {
+ ForEachChannel([action](const ChannelIterator::value_type& pair) mutable {
+ auto surface = std::static_pointer_cast<SurfaceChannel>(pair.second);
+ if (surface->type() == SurfaceTypeEnum::Normal)
+ action(std::static_pointer_cast<DisplaySurface>(surface));
+ });
+ }
+
+ using DisplayConfigurationUpdateNotifier = std::function<void(void)>;
+ void SetDisplayConfigurationUpdateNotifier(
+ DisplayConfigurationUpdateNotifier notifier);
+
+ using VSyncCallback = HardwareComposer::VSyncCallback;
+ void SetVSyncCallback(VSyncCallback callback) {
+ hardware_composer_.SetVSyncCallback(callback);
+ }
+
+ HWCDisplayMetrics GetDisplayMetrics() {
+ return hardware_composer_.display_metrics();
+ }
+
+ void SetActive(bool activated) {
+ if (activated) {
+ hardware_composer_.Resume();
+ } else {
+ hardware_composer_.Suspend();
+ }
+ }
+
+ private:
+ friend BASE;
+ friend DisplaySurface;
+
+ friend class VrDisplayStateService;
+
+ DisplayService();
+ DisplayService(android::Hwc2::Composer* hidl);
+
+ SystemDisplayMetrics OnGetMetrics(pdx::Message& message);
+ int OnCreateSurface(pdx::Message& message, int width, int height,
+ int format, int usage, DisplaySurfaceFlags flags);
+
+ DisplayRPC::ByteBuffer OnGetEdsCapture(pdx::Message& message);
+
+ int OnEnterVrMode(pdx::Message& message);
+ int OnExitVrMode(pdx::Message& message);
+ void OnSetViewerParams(pdx::Message& message, const ViewerParams& view_params);
+
+ // Called by DisplaySurface to signal that a surface property has changed and
+ // the display manager should be notified.
+ void NotifyDisplayConfigurationUpdate();
+
+ int HandleSurfaceMessage(pdx::Message& message);
+
+ DisplayService(const DisplayService&) = delete;
+ void operator=(const DisplayService&) = delete;
+
+ EpollEventDispatcher dispatcher_;
+ HardwareComposer hardware_composer_;
+ DisplayConfigurationUpdateNotifier update_notifier_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SERVICE_H_
diff --git a/libs/vr/libvrflinger/display_surface.cpp b/libs/vr/libvrflinger/display_surface.cpp
new file mode 100644
index 0000000..dff08b5
--- /dev/null
+++ b/libs/vr/libvrflinger/display_surface.cpp
@@ -0,0 +1,435 @@
+#include "display_surface.h"
+
+#include <utils/Trace.h>
+
+#include <private/dvr/platform_defines.h>
+
+#include "display_service.h"
+#include "hardware_composer.h"
+
+#define LOCAL_TRACE 1
+
+using android::pdx::BorrowedChannelHandle;
+using android::pdx::LocalChannelHandle;
+using android::pdx::Message;
+using android::pdx::RemoteChannelHandle;
+using android::pdx::Status;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::IfAnyOf;
+
+namespace android {
+namespace dvr {
+
+DisplaySurface::DisplaySurface(DisplayService* service, int surface_id,
+ int process_id, int width, int height,
+ int format, int usage, int flags)
+ : SurfaceChannel(service, surface_id, SurfaceTypeEnum::Normal,
+ sizeof(DisplaySurfaceMetadata)),
+ process_id_(process_id),
+ posted_buffers_(kMaxPostedBuffers),
+ video_mesh_surfaces_updated_(false),
+ width_(width),
+ height_(height),
+ format_(format),
+ usage_(usage),
+ flags_(flags),
+ client_visible_(false),
+ client_z_order_(0),
+ client_exclude_from_blur_(false),
+ client_blur_behind_(false),
+ manager_visible_(false),
+ manager_z_order_(0),
+ manager_blur_(0.0f),
+ allocated_buffer_index_(0),
+ layer_order_(0) {}
+
+DisplaySurface::~DisplaySurface() {
+ ALOGD_IF(LOCAL_TRACE,
+ "DisplaySurface::~DisplaySurface: surface_id=%d process_id=%d",
+ surface_id(), process_id_);
+}
+
+void DisplaySurface::ManagerSetVisible(bool visible) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ manager_visible_ = visible;
+}
+
+void DisplaySurface::ManagerSetZOrder(int z_order) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ manager_z_order_ = z_order;
+}
+
+void DisplaySurface::ManagerSetBlur(float blur) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ manager_blur_ = blur;
+}
+
+void DisplaySurface::ClientSetVisible(bool visible) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_visible_ = visible;
+}
+
+void DisplaySurface::ClientSetZOrder(int z_order) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_z_order_ = z_order;
+}
+
+void DisplaySurface::ClientSetExcludeFromBlur(bool exclude_from_blur) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_exclude_from_blur_ = exclude_from_blur;
+}
+
+void DisplaySurface::ClientSetBlurBehind(bool blur_behind) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ client_blur_behind_ = blur_behind;
+}
+
+size_t DisplaySurface::GetBufferCount() const {
+ std::lock_guard<std::mutex> autolock(lock_);
+ return buffers_.size();
+}
+
+std::vector<std::shared_ptr<BufferConsumer>> DisplaySurface::GetBuffers() {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::vector<std::shared_ptr<BufferConsumer>> return_vector(buffers_.size());
+
+ for (const auto pair : buffers_) {
+ return_vector.push_back(pair.second);
+ }
+
+ return return_vector;
+}
+
+AcquiredBuffer DisplaySurface::AcquireNewestAvailableBuffer(
+ AcquiredBuffer* skipped_buffer) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ AcquiredBuffer buffer;
+ int frames = 0;
+ // Basic latency stopgap for when the application misses a frame:
+ // If the application recovers on the 2nd or 3rd (etc) frame after
+ // missing, this code will skip frames to catch up by checking if
+ // the next frame is also available.
+ while (!posted_buffers_.IsEmpty() && posted_buffers_.Front().IsAvailable()) {
+ // Capture the skipped buffer into the result parameter.
+ // Note that this API only supports skipping one buffer per vsync.
+ if (frames > 0 && skipped_buffer)
+ *skipped_buffer = std::move(buffer);
+ ++frames;
+ buffer = std::move(posted_buffers_.Front());
+ posted_buffers_.PopFront();
+ if (frames == 2)
+ break;
+ }
+ return buffer;
+}
+
+bool DisplaySurface::IsBufferAvailable() const {
+ std::lock_guard<std::mutex> autolock(lock_);
+ return !posted_buffers_.IsEmpty() && posted_buffers_.Front().IsAvailable();
+}
+
+bool DisplaySurface::IsBufferPosted() const {
+ std::lock_guard<std::mutex> autolock(lock_);
+ return !posted_buffers_.IsEmpty();
+}
+
+AcquiredBuffer DisplaySurface::AcquireCurrentBuffer() {
+ std::lock_guard<std::mutex> autolock(lock_);
+ if (posted_buffers_.IsEmpty()) {
+ ALOGE("Error: attempt to acquire buffer when none are posted.");
+ return AcquiredBuffer();
+ }
+ AcquiredBuffer buffer = std::move(posted_buffers_.Front());
+ posted_buffers_.PopFront();
+ return buffer;
+}
+
+int DisplaySurface::GetConsumers(std::vector<LocalChannelHandle>* consumers) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::vector<LocalChannelHandle> items;
+
+ for (auto pair : buffers_) {
+ const auto& buffer = pair.second;
+
+ Status<LocalChannelHandle> consumer_channel = buffer->CreateConsumer();
+ if (!consumer_channel) {
+ ALOGE(
+ "DisplaySurface::GetConsumers: Failed to get a new consumer for "
+ "buffer %d: %s",
+ buffer->id(), consumer_channel.GetErrorMessage().c_str());
+ return -consumer_channel.error();
+ }
+
+ items.push_back(consumer_channel.take());
+ }
+
+ *consumers = std::move(items);
+ return 0;
+}
+
+int DisplaySurface::HandleMessage(pdx::Message& message) {
+ switch (message.GetOp()) {
+ case DisplayRPC::SetAttributes::Opcode:
+ DispatchRemoteMethod<DisplayRPC::SetAttributes>(
+ *this, &DisplaySurface::OnClientSetAttributes, message);
+ break;
+
+ case DisplayRPC::AllocateBuffer::Opcode:
+ DispatchRemoteMethod<DisplayRPC::AllocateBuffer>(
+ *this, &DisplaySurface::OnAllocateBuffer, message);
+ break;
+
+ case DisplayRPC::CreateVideoMeshSurface::Opcode:
+ DispatchRemoteMethod<DisplayRPC::CreateVideoMeshSurface>(
+ *this, &DisplaySurface::OnCreateVideoMeshSurface, message);
+ break;
+
+ default:
+ return SurfaceChannel::HandleMessage(message);
+ }
+
+ return 0;
+}
+
+int DisplaySurface::OnClientSetAttributes(
+ pdx::Message& /*message*/, const DisplaySurfaceAttributes& attributes) {
+ for (const auto& attribute : attributes) {
+ const auto& key = attribute.first;
+ const auto* variant = &attribute.second;
+ bool invalid_value = false;
+ switch (key) {
+ case DisplaySurfaceAttributeEnum::ZOrder:
+ invalid_value = !IfAnyOf<int32_t, int64_t, float>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetZOrder(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::Visible:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetVisible(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::ExcludeFromBlur:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetExcludeFromBlur(value);
+ });
+ break;
+ case DisplaySurfaceAttributeEnum::BlurBehind:
+ invalid_value = !IfAnyOf<int32_t, int64_t, bool>::Call(
+ variant, [this](const auto& value) {
+ DisplaySurface::ClientSetBlurBehind(value);
+ });
+ break;
+ default:
+ ALOGW(
+ "DisplaySurface::OnClientSetAttributes: Unrecognized attribute %d "
+ "surface_id=%d",
+ key, surface_id());
+ break;
+ }
+
+ if (invalid_value) {
+ ALOGW(
+ "DisplaySurface::OnClientSetAttributes: Failed to set display "
+ "surface attribute '%s' because of incompatible type: %d",
+ DisplaySurfaceAttributeEnum::ToString(key).c_str(), variant->index());
+ }
+ }
+
+ service()->NotifyDisplayConfigurationUpdate();
+ return 0;
+}
+
+// Allocates a new buffer for the DisplaySurface associated with this channel.
+std::pair<uint32_t, LocalChannelHandle> DisplaySurface::OnAllocateBuffer(
+ pdx::Message& message) {
+ // Inject flag to enable framebuffer compression for the application buffers.
+ // TODO(eieio,jbates): Make this configurable per hardware platform.
+ const int usage = usage_ | GRALLOC_USAGE_QCOM_FRAMEBUFFER_COMPRESSION;
+ const int slice_count =
+ (flags_ & static_cast<int>(DisplaySurfaceFlagsEnum::SeparateGeometry))
+ ? 2
+ : 1;
+
+ ALOGI_IF(
+ TRACE,
+ "DisplaySurface::OnAllocateBuffer: width=%d height=%d format=%x usage=%x "
+ "slice_count=%d",
+ width_, height_, format_, usage, slice_count);
+
+ // Create a producer buffer to hand back to the sender.
+ auto producer = BufferProducer::Create(width_, height_, format_, usage,
+ sizeof(uint64_t), slice_count);
+ if (!producer)
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+
+ // Create and import a consumer attached to the producer.
+ Status<LocalChannelHandle> consumer_channel = producer->CreateConsumer();
+ if (!consumer_channel)
+ REPLY_ERROR_RETURN(message, consumer_channel.error(), {});
+
+ std::shared_ptr<BufferConsumer> consumer =
+ BufferConsumer::Import(consumer_channel.take());
+ if (!consumer)
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+
+ // Add the consumer to this surface.
+ int err = AddConsumer(consumer);
+ if (err < 0) {
+ ALOGE("DisplaySurface::OnAllocateBuffer: failed to add consumer: buffer=%d",
+ consumer->id());
+ REPLY_ERROR_RETURN(message, -err, {});
+ }
+
+ // Move the channel handle so that it doesn't get closed when the producer
+ // goes out of scope.
+ std::pair<uint32_t, LocalChannelHandle> return_value(
+ allocated_buffer_index_, std::move(producer->GetChannelHandle()));
+
+ // Save buffer index, associated with the buffer id so that it can be looked
+ // up later.
+ buffer_id_to_index_[consumer->id()] = allocated_buffer_index_;
+ ++allocated_buffer_index_;
+
+ return return_value;
+}
+
+RemoteChannelHandle DisplaySurface::OnCreateVideoMeshSurface(
+ pdx::Message& message) {
+ if (flags_ & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION) {
+ ALOGE(
+ "DisplaySurface::OnCreateVideoMeshSurface: system distorion is "
+ "disabled on this display surface, cannot create VideoMeshSurface on "
+ "top of it.");
+ REPLY_ERROR_RETURN(message, EINVAL, {});
+ }
+
+ int channel_id;
+ auto status = message.PushChannel(0, nullptr, &channel_id);
+
+ if (!status) {
+ ALOGE(
+ "DisplaySurface::OnCreateVideoMeshSurface: failed to push channel: %s",
+ status.GetErrorMessage().c_str());
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+ }
+
+ auto surface = std::make_shared<VideoMeshSurface>(service(), channel_id);
+ const int ret = service()->SetChannel(channel_id, surface);
+ if (ret < 0) {
+ ALOGE(
+ "DisplaySurface::OnCreateVideoMeshSurface: failed to set new video "
+ "mesh surface channel: %s",
+ strerror(-ret));
+ REPLY_ERROR_RETURN(message, ENOMEM, {});
+ }
+
+ {
+ std::lock_guard<std::mutex> autolock(lock_);
+ pending_video_mesh_surfaces_.push_back(surface);
+ video_mesh_surfaces_updated_ = true;
+ }
+
+ return status.take();
+}
+
+int DisplaySurface::AddConsumer(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ALOGD_IF(TRACE, "DisplaySurface::AddConsumer: buffer_id=%d", consumer->id());
+ // Add the consumer to the epoll dispatcher, edge-triggered.
+ int err = service()->dispatcher_.AddEventHandler(
+ consumer->event_fd(), EPOLLET | EPOLLIN | EPOLLHUP,
+ std::bind(&DisplaySurface::HandleConsumerEvents,
+ std::static_pointer_cast<DisplaySurface>(shared_from_this()),
+ consumer, std::placeholders::_1));
+ if (err) {
+ ALOGE(
+ "DisplaySurface::AddConsumer: failed to add epoll event handler for "
+ "consumer: %s",
+ strerror(-err));
+ return err;
+ }
+
+ // Add the consumer to the list of buffers for this surface.
+ std::lock_guard<std::mutex> autolock(lock_);
+ buffers_.insert(std::make_pair(consumer->id(), consumer));
+ return 0;
+}
+
+void DisplaySurface::RemoveConsumer(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ALOGD_IF(TRACE, "DisplaySurface::RemoveConsumer: buffer_id=%d",
+ consumer->id());
+ service()->dispatcher_.RemoveEventHandler(consumer->event_fd());
+
+ std::lock_guard<std::mutex> autolock(lock_);
+ buffers_.erase(consumer->id());
+}
+
+void DisplaySurface::RemoveConsumerUnlocked(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ALOGD_IF(TRACE, "DisplaySurface::RemoveConsumerUnlocked: buffer_id=%d",
+ consumer->id());
+ service()->dispatcher_.RemoveEventHandler(consumer->event_fd());
+ buffers_.erase(consumer->id());
+}
+
+void DisplaySurface::OnPostConsumer(
+ const std::shared_ptr<BufferConsumer>& consumer) {
+ ATRACE_NAME("DisplaySurface::OnPostConsumer");
+ std::lock_guard<std::mutex> autolock(lock_);
+
+ if (posted_buffers_.IsFull()) {
+ ALOGE("Error: posted buffers full, overwriting");
+ posted_buffers_.PopBack();
+ }
+
+ int error;
+ posted_buffers_.Append(AcquiredBuffer(consumer, &error));
+
+ // Remove the consumer if the other end was closed.
+ if (posted_buffers_.Back().IsEmpty() && error == -EPIPE)
+ RemoveConsumerUnlocked(consumer);
+}
+
+void DisplaySurface::HandleConsumerEvents(
+ const std::shared_ptr<BufferConsumer>& consumer, int events) {
+ if (events & EPOLLHUP) {
+ ALOGD_IF(TRACE,
+ "DisplaySurface::HandleConsumerEvents: removing event handler for "
+ "buffer=%d",
+ consumer->id());
+ RemoveConsumer(consumer);
+ } else if (events & EPOLLIN) {
+ // BufferHub uses EPOLLIN to signal consumer ownership.
+ ALOGD_IF(TRACE,
+ "DisplaySurface::HandleConsumerEvents: posting buffer=%d for "
+ "process=%d",
+ consumer->id(), process_id_);
+
+ OnPostConsumer(consumer);
+ }
+}
+
+std::vector<std::shared_ptr<VideoMeshSurface>>
+DisplaySurface::GetVideoMeshSurfaces() {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::vector<std::shared_ptr<VideoMeshSurface>> surfaces;
+
+ for (auto& surface : pending_video_mesh_surfaces_) {
+ if (auto video_surface = surface.lock()) {
+ surfaces.push_back(video_surface);
+ } else {
+ ALOGE("Unable to lock video mesh surface.");
+ }
+ }
+
+ pending_video_mesh_surfaces_.clear();
+ video_mesh_surfaces_updated_ = false;
+ return surfaces;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/display_surface.h b/libs/vr/libvrflinger/display_surface.h
new file mode 100644
index 0000000..b7bcd97
--- /dev/null
+++ b/libs/vr/libvrflinger/display_surface.h
@@ -0,0 +1,211 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SURFACE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SURFACE_H_
+
+#include <pdx/file_handle.h>
+#include <pdx/service.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/ring_buffer.h>
+
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "acquired_buffer.h"
+#include "epoll_event_dispatcher.h"
+#include "surface_channel.h"
+#include "video_mesh_surface.h"
+
+namespace android {
+namespace dvr {
+
+class DisplayService;
+
+// DisplaySurface is the service-side notion of a client display context. It is
+// responsible for managing display buffer format, geometry, and state, and
+// maintains the buffer consumers connected to the client.
+class DisplaySurface : public SurfaceChannel {
+ public:
+ DisplaySurface(DisplayService* service, int surface_id, int process_id,
+ int width, int height, int format, int usage, int flags);
+ ~DisplaySurface() override;
+
+ int process_id() const { return process_id_; }
+ int width() const { return width_; }
+ int height() const { return height_; }
+ int format() const { return format_; }
+ int usage() const { return usage_; }
+ int flags() const { return flags_; }
+
+ bool client_visible() const { return client_visible_; }
+ int client_z_order() const { return client_z_order_; }
+ bool client_exclude_from_blur() const { return client_exclude_from_blur_; }
+ bool client_blur_behind() const { return client_blur_behind_; }
+
+ bool manager_visible() const { return manager_visible_; }
+ int manager_z_order() const { return manager_z_order_; }
+ float manager_blur() const { return manager_blur_; }
+
+ bool video_mesh_surfaces_updated() const {
+ return video_mesh_surfaces_updated_;
+ }
+
+ volatile const DisplaySurfaceMetadata* GetMetadataBufferPtr() {
+ if (EnsureMetadataBuffer()) {
+ void* addr = nullptr;
+ metadata_buffer_->GetBlobReadWritePointer(metadata_size(), &addr);
+ return static_cast<const volatile DisplaySurfaceMetadata*>(addr);
+ } else {
+ return nullptr;
+ }
+ }
+
+ uint32_t GetRenderBufferIndex(int buffer_id) {
+ return buffer_id_to_index_[buffer_id];
+ }
+
+ size_t GetBufferCount() const;
+ std::vector<std::shared_ptr<BufferConsumer>> GetBuffers();
+
+ // Gets a new set of consumers for all of the surface's buffers. These
+ // consumers are independent from the consumers maintained internally to the
+ // surface and may be passed to other processes over IPC.
+ int GetConsumers(std::vector<pdx::LocalChannelHandle>* consumers);
+
+ template <class A>
+ void ForEachBuffer(A action) {
+ std::lock_guard<std::mutex> autolock(lock_);
+ std::for_each(buffers_.begin(), buffers_.end(), action);
+ }
+
+ bool IsBufferAvailable() const;
+ bool IsBufferPosted() const;
+ AcquiredBuffer AcquireCurrentBuffer();
+
+ // Get the newest buffer. Up to one buffer will be skipped. If a buffer is
+ // skipped, it will be stored in skipped_buffer if non null.
+ AcquiredBuffer AcquireNewestAvailableBuffer(AcquiredBuffer* skipped_buffer);
+
+ // Display manager interface to control visibility and z order.
+ void ManagerSetVisible(bool visible);
+ void ManagerSetZOrder(int z_order);
+ void ManagerSetBlur(float blur);
+
+ // A surface must be set visible by both the client and the display manager to
+ // be visible on screen.
+ bool IsVisible() const { return client_visible_ && manager_visible_; }
+
+ // A surface is blurred if the display manager requests it.
+ bool IsBlurred() const { return manager_blur_ > 0.0f; }
+
+ // Set by HardwareComposer to the current logical layer order of this surface.
+ void SetLayerOrder(int layer_order) { layer_order_ = layer_order; }
+ // Gets the unique z-order index of this surface among other visible surfaces.
+ // This is not the same as the hardware layer index, as not all display
+ // surfaces map directly to hardware layers. Lower layer orders should be
+ // composited underneath higher layer orders.
+ int layer_order() const { return layer_order_; }
+
+ // Lock all video mesh surfaces so that VideoMeshCompositor can access them.
+ std::vector<std::shared_ptr<VideoMeshSurface>> GetVideoMeshSurfaces();
+
+ private:
+ friend class DisplayService;
+
+ // The capacity of the pending buffer queue. Should be enough to hold all the
+ // buffers of this DisplaySurface, although in practice only 1 or 2 frames
+ // will be pending at a time.
+ static constexpr int kMaxPostedBuffers =
+ kSurfaceBufferMaxCount * kSurfaceViewMaxCount;
+
+ // Returns whether a frame is available without locking the mutex.
+ bool IsFrameAvailableNoLock() const;
+
+ // Handles epoll events for BufferHub consumers. Events are mainly generated
+ // by producers posting buffers ready for display. This handler runs on the
+ // epoll event thread.
+ void HandleConsumerEvents(const std::shared_ptr<BufferConsumer>& consumer,
+ int events);
+
+ // Dispatches display surface messages to the appropriate handlers. This
+ // handler runs on the displayd message dispatch thread.
+ int HandleMessage(pdx::Message& message) override;
+
+ // Sets display surface's client-controlled attributes.
+ int OnClientSetAttributes(pdx::Message& message,
+ const DisplaySurfaceAttributes& attributes);
+
+ // Allocates a buffer with the display surface geometry and settings and
+ // returns it to the client.
+ std::pair<uint32_t, pdx::LocalChannelHandle> OnAllocateBuffer(
+ pdx::Message& message);
+
+ // Creates a video mesh surface associated with this surface and returns it
+ // to the client.
+ pdx::RemoteChannelHandle OnCreateVideoMeshSurface(pdx::Message& message);
+
+ // Sets the current buffer for the display surface, discarding the previous
+ // buffer if it is not already claimed. Runs on the epoll event thread.
+ void OnPostConsumer(const std::shared_ptr<BufferConsumer>& consumer);
+
+ // Client interface (called through IPC) to set visibility and z order.
+ void ClientSetVisible(bool visible);
+ void ClientSetZOrder(int z_order);
+ void ClientSetExcludeFromBlur(bool exclude_from_blur);
+ void ClientSetBlurBehind(bool blur_behind);
+
+ // Runs on the displayd message dispatch thread.
+ int AddConsumer(const std::shared_ptr<BufferConsumer>& consumer);
+
+ // Runs on the epoll event thread.
+ void RemoveConsumer(const std::shared_ptr<BufferConsumer>& consumer);
+
+ // Runs on the epoll and display post thread.
+ void RemoveConsumerUnlocked(const std::shared_ptr<BufferConsumer>& consumer);
+
+ DisplaySurface(const DisplaySurface&) = delete;
+ void operator=(const DisplaySurface&) = delete;
+
+ int process_id_;
+
+ // Synchronizes access to mutable state below between message dispatch thread,
+ // epoll event thread, and frame post thread.
+ mutable std::mutex lock_;
+ std::unordered_map<int, std::shared_ptr<BufferConsumer>> buffers_;
+
+ // In a triple-buffered surface, up to kMaxPostedBuffers buffers may be
+ // posted and pending.
+ RingBuffer<AcquiredBuffer> posted_buffers_;
+
+ // Provides access to VideoMeshSurface. Here we don't want to increase
+ // the reference count immediately on allocation, will leave it into
+ // compositor's hand.
+ std::vector<std::weak_ptr<VideoMeshSurface>> pending_video_mesh_surfaces_;
+ volatile bool video_mesh_surfaces_updated_;
+
+ // Surface parameters.
+ int width_;
+ int height_;
+ int format_;
+ int usage_;
+ int flags_;
+ bool client_visible_;
+ int client_z_order_;
+ bool client_exclude_from_blur_;
+ bool client_blur_behind_;
+ bool manager_visible_;
+ int manager_z_order_;
+ float manager_blur_;
+ // The monotonically increasing index for allocated buffers in this surface.
+ uint32_t allocated_buffer_index_;
+ int layer_order_;
+
+ // Maps from the buffer id to the corresponding allocated buffer index.
+ std::unordered_map<int, uint32_t> buffer_id_to_index_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_DISPLAY_SURFACE_H_
diff --git a/libs/vr/libvrflinger/epoll_event_dispatcher.cpp b/libs/vr/libvrflinger/epoll_event_dispatcher.cpp
new file mode 100644
index 0000000..b37e76e
--- /dev/null
+++ b/libs/vr/libvrflinger/epoll_event_dispatcher.cpp
@@ -0,0 +1,142 @@
+#include "epoll_event_dispatcher.h"
+
+#include <log/log.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/prctl.h>
+
+#include <dvr/performance_client_api.h>
+
+namespace android {
+namespace dvr {
+
+EpollEventDispatcher::EpollEventDispatcher()
+ : exit_thread_(false), epoll_fd_(-1), event_fd_(-1) {
+ epoll_fd_ = epoll_create(64);
+ if (epoll_fd_ < 0) {
+ ALOGE("Failed to create epoll fd: %s", strerror(errno));
+ return;
+ }
+
+ event_fd_ = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (event_fd_ < 0) {
+ ALOGE("Failed to create event for epolling: %s", strerror(errno));
+ return;
+ }
+
+ // Add watch for eventfd. This should only watch for EPOLLIN, which gets set
+ // when eventfd_write occurs. Use "this" as a unique sentinal value to
+ // identify events from the event fd.
+ epoll_event event = {.events = EPOLLIN, .data = {.ptr = this}};
+ if (epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, event_fd_, &event) < 0) {
+ ALOGE("Failed to add eventfd to epoll set because: %s", strerror(errno));
+ return;
+ }
+
+ thread_ = std::thread(&EpollEventDispatcher::EventThread, this);
+}
+
+EpollEventDispatcher::~EpollEventDispatcher() {
+ Stop();
+
+ close(epoll_fd_);
+ close(event_fd_);
+}
+
+void EpollEventDispatcher::Stop() {
+ exit_thread_.store(true);
+ eventfd_write(event_fd_, 1);
+}
+
+int EpollEventDispatcher::AddEventHandler(int fd, int event_mask,
+ Handler handler) {
+ std::lock_guard<std::mutex> lock(lock_);
+
+ epoll_event event;
+ event.events = event_mask;
+ event.data.ptr = &(handlers_[fd] = handler);
+
+ ALOGD_IF(
+ TRACE,
+ "EpollEventDispatcher::AddEventHandler: fd=%d event_mask=0x%x handler=%p",
+ fd, event_mask, event.data.ptr);
+
+ int err = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, fd, &event);
+ return err < 0 ? -errno : 0;
+}
+
+int EpollEventDispatcher::RemoveEventHandler(int fd) {
+ ALOGD_IF(TRACE, "EpollEventDispatcher::RemoveEventHandler: fd=%d", fd);
+ std::lock_guard<std::mutex> lock(lock_);
+
+ epoll_event dummy; // See BUGS in man 2 epoll_ctl.
+ if (epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, fd, &dummy) < 0) {
+ ALOGE("Failed to remove fd from epoll set because: %s", strerror(errno));
+ return -errno;
+ }
+
+ // If the fd was valid above, add it to the list of ids to remove.
+ removed_handlers_.push_back(fd);
+
+ // Wake up the event thread to clean up.
+ eventfd_write(event_fd_, 1);
+
+ return 0;
+}
+
+void EpollEventDispatcher::EventThread() {
+ prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("EpollEvent"), 0, 0, 0);
+
+ const int error = dvrSetSchedulerClass(0, "graphics");
+ LOG_ALWAYS_FATAL_IF(
+ error < 0,
+ "EpollEventDispatcher::EventThread: Failed to set scheduler class: %s",
+ strerror(-error));
+
+ const size_t kMaxNumEvents = 128;
+ epoll_event events[kMaxNumEvents];
+
+ while (!exit_thread_.load()) {
+ int num_events = epoll_wait(epoll_fd_, events, kMaxNumEvents, -1);
+ if (num_events < 0 && errno != EINTR)
+ break;
+
+ ALOGD_IF(TRACE, "EpollEventDispatcher::EventThread: num_events=%d",
+ num_events);
+
+ for (int i = 0; i < num_events; i++) {
+ ALOGD_IF(
+ TRACE,
+ "EpollEventDispatcher::EventThread: event %d: handler=%p events=0x%x",
+ i, events[i].data.ptr, events[i].events);
+
+ if (events[i].data.ptr == this) {
+ // Clear pending event on event_fd_. Serialize the read with respect to
+ // writes from other threads.
+ std::lock_guard<std::mutex> lock(lock_);
+ eventfd_t value;
+ eventfd_read(event_fd_, &value);
+ } else {
+ auto handler = reinterpret_cast<Handler*>(events[i].data.ptr);
+ if (handler)
+ (*handler)(events[i].events);
+ }
+ }
+
+ // Remove any handlers that have been posted for removal. This is done here
+ // instead of in RemoveEventHandler() to prevent races between the dispatch
+ // thread and the code requesting the removal. Handlers are guaranteed to
+ // stay alive between exiting epoll_wait() and the dispatch loop above.
+ std::lock_guard<std::mutex> lock(lock_);
+ for (auto handler_fd : removed_handlers_) {
+ ALOGD_IF(TRACE,
+ "EpollEventDispatcher::EventThread: removing handler: fd=%d",
+ handler_fd);
+ handlers_.erase(handler_fd);
+ }
+ removed_handlers_.clear();
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/epoll_event_dispatcher.h b/libs/vr/libvrflinger/epoll_event_dispatcher.h
new file mode 100644
index 0000000..43bca2e
--- /dev/null
+++ b/libs/vr/libvrflinger/epoll_event_dispatcher.h
@@ -0,0 +1,61 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_EPOLL_EVENT_DISPATCHER_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_EPOLL_EVENT_DISPATCHER_H_
+
+#include <sys/epoll.h>
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+namespace android {
+namespace dvr {
+
+class EpollEventDispatcher {
+ public:
+ // Function type for event handlers. The handler receives a bitmask of the
+ // epoll events that occurred on the file descriptor associated with the
+ // handler.
+ using Handler = std::function<void(int)>;
+
+ EpollEventDispatcher();
+ ~EpollEventDispatcher();
+
+ // |handler| is called on the internal dispatch thread when |fd| is signaled
+ // by events in |event_mask|.
+ // Return 0 on success or a negative error code on failure.
+ int AddEventHandler(int fd, int event_mask, Handler handler);
+ int RemoveEventHandler(int fd);
+
+ void Stop();
+
+ private:
+ void EventThread();
+
+ std::thread thread_;
+ std::atomic<bool> exit_thread_;
+
+ // Protects handlers_ and removed_handlers_ and serializes operations on
+ // epoll_fd_ and event_fd_.
+ std::mutex lock_;
+
+ // Maintains a map of fds to event handlers. This is primarily to keep any
+ // references alive that may be bound in the std::function instances. It is
+ // not used at dispatch time to avoid performance problems with different
+ // versions of std::unordered_map.
+ std::unordered_map<int, Handler> handlers_;
+
+ // List of fds to be removed from the map. The actual removal is performed
+ // by the event dispatch thread to avoid races.
+ std::vector<int> removed_handlers_;
+
+ int epoll_fd_;
+ int event_fd_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_EPOLL_EVENT_DISPATCHER_H_
diff --git a/libs/vr/libvrflinger/hardware_composer.cpp b/libs/vr/libvrflinger/hardware_composer.cpp
new file mode 100644
index 0000000..e0b592e
--- /dev/null
+++ b/libs/vr/libvrflinger/hardware_composer.cpp
@@ -0,0 +1,1572 @@
+#include "hardware_composer.h"
+
+#include <log/log.h>
+#include <cutils/properties.h>
+#include <cutils/sched_policy.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sync/sync.h>
+#include <sys/eventfd.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/system_properties.h>
+#include <sys/timerfd.h>
+#include <unistd.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <functional>
+#include <map>
+
+#include <dvr/performance_client_api.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/display_types.h>
+#include <private/dvr/pose_client_internal.h>
+#include <private/dvr/sync_util.h>
+
+#include "debug_hud_data.h"
+#include "screenshot_service.h"
+
+using android::pdx::LocalHandle;
+
+namespace android {
+namespace dvr {
+
+namespace {
+
+// If the number of pending fences goes over this count at the point when we
+// are about to submit a new frame to HWC, we will drop the frame. This should
+// be a signal that the display driver has begun queuing frames. Note that with
+// smart displays (with RAM), the fence is signaled earlier than the next vsync,
+// at the point when the DMA to the display completes. Currently we use a smart
+// display and the EDS timing coincides with zero pending fences, so this is 0.
+constexpr int kAllowedPendingFenceCount = 0;
+
+// If we think we're going to miss vsync by more than this amount, skip the
+// frame.
+constexpr int64_t kFrameSkipThresholdNs = 4000000; // 4ms
+
+// Counter PostLayers() deficiency by requiring apps to produce a frame at least
+// 2.5ms before vsync. See b/28881672.
+constexpr int64_t kFrameTimeEstimateMin = 2500000; // 2.5ms
+
+constexpr size_t kDefaultDisplayConfigCount = 32;
+
+constexpr float kMetersPerInch = 0.0254f;
+
+const char kBacklightBrightnessSysFile[] =
+ "/sys/class/leds/lcd-backlight/brightness";
+
+const char kPrimaryDisplayVSyncEventFile[] =
+ "/sys/class/graphics/fb0/vsync_event";
+
+const char kPrimaryDisplayWaitPPEventFile[] = "/sys/class/graphics/fb0/wait_pp";
+
+const char kDvrPerformanceProperty[] = "sys.dvr.performance";
+
+const char kRightEyeOffsetProperty[] = "dreamos.right_eye_offset_ns";
+
+// Returns our best guess for the time the compositor will spend rendering the
+// next frame.
+int64_t GuessFrameTime(int compositor_visible_layer_count) {
+ // The cost of asynchronous EDS and lens warp is currently measured at 2.5ms
+ // for one layer and 7ms for two layers, but guess a higher frame time to
+ // account for CPU overhead. This guess is only used before we've measured the
+ // actual time to render a frame for the current compositor configuration.
+ switch (compositor_visible_layer_count) {
+ case 0:
+ return 500000; // .5ms
+ case 1:
+ return 5000000; // 5ms
+ default:
+ return 10500000; // 10.5ms
+ }
+}
+
+// Get time offset from a vsync to when the pose for that vsync should be
+// predicted out to. For example, if scanout gets halfway through the frame
+// at the halfway point between vsyncs, then this could be half the period.
+// With global shutter displays, this should be changed to the offset to when
+// illumination begins. Low persistence adds a frame of latency, so we predict
+// to the center of the next frame.
+inline int64_t GetPosePredictionTimeOffset(int64_t vsync_period_ns) {
+ return (vsync_period_ns * 150) / 100;
+}
+
+} // anonymous namespace
+
+HardwareComposer::HardwareComposer()
+ : HardwareComposer(nullptr) {
+}
+
+HardwareComposer::HardwareComposer(Hwc2::Composer* hwc2_hidl)
+ : hwc2_hidl_(hwc2_hidl),
+ display_transform_(HWC_TRANSFORM_NONE),
+ display_surfaces_updated_(false),
+ hardware_layers_need_update_(false),
+ display_on_(false),
+ active_layer_count_(0),
+ gpu_layer_(nullptr),
+ terminate_post_thread_event_fd_(-1),
+ pause_post_thread_(true),
+ backlight_brightness_fd_(-1),
+ primary_display_vsync_event_fd_(-1),
+ primary_display_wait_pp_fd_(-1),
+ vsync_sleep_timer_fd_(-1),
+ last_vsync_timestamp_(0),
+ vsync_count_(0),
+ frame_skip_count_(0),
+ pose_client_(nullptr) {
+ std::transform(layer_storage_.begin(), layer_storage_.end(), layers_.begin(),
+ [](auto& layer) { return &layer; });
+
+ callbacks_ = new ComposerCallback;
+}
+
+HardwareComposer::~HardwareComposer(void) {
+ if (!IsSuspended()) {
+ Suspend();
+ }
+}
+
+bool HardwareComposer::Resume() {
+ std::lock_guard<std::mutex> autolock(layer_mutex_);
+
+ if (!IsSuspended()) {
+ ALOGE("HardwareComposer::Resume: HardwareComposer is already running.");
+ return false;
+ }
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ static const uint32_t attributes[] = {
+ HWC_DISPLAY_WIDTH, HWC_DISPLAY_HEIGHT, HWC_DISPLAY_VSYNC_PERIOD,
+ HWC_DISPLAY_DPI_X, HWC_DISPLAY_DPI_Y, HWC_DISPLAY_NO_ATTRIBUTE,
+ };
+
+ std::vector<Hwc2::Config> configs;
+ ret = (int32_t)hwc2_hidl_->getDisplayConfigs(HWC_DISPLAY_PRIMARY, &configs);
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display configs");
+ return false;
+ }
+
+ uint32_t num_configs = configs.size();
+
+ for (size_t i = 0; i < num_configs; i++) {
+ ALOGI("HardwareComposer: cfg[%zd/%zd] = 0x%08x", i, num_configs,
+ configs[i]);
+
+ ret = GetDisplayMetrics(HWC_DISPLAY_PRIMARY, configs[i],
+ &native_display_metrics_);
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display attributes %d", ret);
+ continue;
+ } else {
+ ret =
+ (int32_t)hwc2_hidl_->setActiveConfig(HWC_DISPLAY_PRIMARY, configs[i]);
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to set display configuration; ret=%d",
+ ret);
+ continue;
+ }
+
+ break;
+ }
+ }
+
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Could not set a valid display configuration.");
+ return false;
+ }
+
+ // Set the display metrics but never use rotation to avoid the long latency of
+ // rotation processing in hwc.
+ display_transform_ = HWC_TRANSFORM_NONE;
+ display_metrics_ = native_display_metrics_;
+
+ ALOGI(
+ "HardwareComposer: primary display attributes: width=%d height=%d "
+ "vsync_period_ns=%d DPI=%dx%d",
+ native_display_metrics_.width, native_display_metrics_.height,
+ native_display_metrics_.vsync_period_ns, native_display_metrics_.dpi.x,
+ native_display_metrics_.dpi.y);
+
+ // Always turn off vsync when we start.
+ EnableVsync(false);
+
+ constexpr int format = HAL_PIXEL_FORMAT_RGBA_8888;
+ constexpr int usage =
+ GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER;
+
+ framebuffer_target_ = std::make_shared<IonBuffer>(
+ native_display_metrics_.width, native_display_metrics_.height, format,
+ usage);
+
+ // Associate each Layer instance with a hardware composer layer.
+ for (auto layer : layers_) {
+ layer->Initialize(hwc2_hidl_.get(), &native_display_metrics_);
+ }
+
+ // Open the backlight brightness control sysfs node.
+ backlight_brightness_fd_ = LocalHandle(kBacklightBrightnessSysFile, O_RDWR);
+ ALOGW_IF(!backlight_brightness_fd_,
+ "HardwareComposer: Failed to open backlight brightness control: %s",
+ strerror(errno));
+
+ // Open the vsync event node for the primary display.
+ // TODO(eieio): Move this into a platform-specific class.
+ primary_display_vsync_event_fd_ =
+ LocalHandle(kPrimaryDisplayVSyncEventFile, O_RDONLY);
+ ALOGE_IF(!primary_display_vsync_event_fd_,
+ "HardwareComposer: Failed to open vsync event node for primary "
+ "display: %s",
+ strerror(errno));
+
+ // Open the wait pingpong status node for the primary display.
+ // TODO(eieio): Move this into a platform-specific class.
+ primary_display_wait_pp_fd_ =
+ LocalHandle(kPrimaryDisplayWaitPPEventFile, O_RDONLY);
+ ALOGE_IF(
+ !primary_display_wait_pp_fd_,
+ "HardwareComposer: Failed to open wait_pp node for primary display: %s",
+ strerror(errno));
+
+ // Create a timerfd based on CLOCK_MONOTINIC.
+ vsync_sleep_timer_fd_.Reset(timerfd_create(CLOCK_MONOTONIC, 0));
+ LOG_ALWAYS_FATAL_IF(
+ !vsync_sleep_timer_fd_,
+ "HardwareComposer: Failed to create vsync sleep timerfd: %s",
+ strerror(errno));
+
+ // Connect to pose service.
+ pose_client_ = dvrPoseCreate();
+ ALOGE_IF(!pose_client_, "HardwareComposer: Failed to create pose client");
+
+ // Variables used to control the post thread state
+ pause_post_thread_ = false;
+ terminate_post_thread_event_fd_.Reset(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
+
+ LOG_ALWAYS_FATAL_IF(
+ !terminate_post_thread_event_fd_,
+ "HardwareComposer: Failed to create terminate PostThread event fd : %s",
+ strerror(errno));
+
+ // If get_id() is the default thread::id object, it has not been created yet
+ if (post_thread_.get_id() == std::thread::id()) {
+ post_thread_ = std::thread(&HardwareComposer::PostThread, this);
+ } else {
+ UpdateDisplayState();
+ thread_pause_semaphore_.notify_one();
+ }
+
+ return true;
+}
+
+bool HardwareComposer::Suspend() {
+ // Wait for any pending layer operations to finish
+ std::unique_lock<std::mutex> layer_lock(layer_mutex_);
+
+ if (IsSuspended()) {
+ ALOGE("HardwareComposer::Suspend: HardwareComposer is already suspended.");
+ return false;
+ }
+
+ PausePostThread();
+
+ EnableVsync(false);
+ SetPowerMode(HWC_DISPLAY_PRIMARY, HWC2_POWER_MODE_OFF);
+
+ backlight_brightness_fd_.Close();
+ primary_display_vsync_event_fd_.Close();
+ primary_display_wait_pp_fd_.Close();
+ vsync_sleep_timer_fd_.Close();
+ retire_fence_fds_.clear();
+ gpu_layer_ = nullptr;
+
+ // We have to destroy the layers before we close the hwc device
+ for (size_t i = 0; i < kMaxHardwareLayers; ++i) {
+ layers_[i]->Reset();
+ }
+
+ active_layer_count_ = 0;
+
+ framebuffer_target_.reset();
+
+ //hwc2_hidl_.reset();
+
+ if (pose_client_)
+ dvrPoseDestroy(pose_client_);
+
+ return true;
+}
+
+void HardwareComposer::PausePostThread() {
+ pause_post_thread_ = true;
+
+ int error = eventfd_write(terminate_post_thread_event_fd_.Get(), 1);
+ ALOGE_IF(error,
+ "HardwareComposer::PausePostThread: could not write post "
+ "thread termination event fd : %d",
+ error);
+
+ std::unique_lock<std::mutex> wait_for_thread(thread_pause_mutex_);
+ terminate_post_thread_event_fd_.Close();
+}
+
+DisplayMetrics HardwareComposer::GetHmdDisplayMetrics() const {
+ vec2i screen_size(display_metrics_.width, display_metrics_.height);
+ DisplayOrientation orientation =
+ (display_metrics_.width > display_metrics_.height
+ ? DisplayOrientation::kLandscape
+ : DisplayOrientation::kPortrait);
+ float dpi_x = static_cast<float>(display_metrics_.dpi.x) / 1000.0f;
+ float dpi_y = static_cast<float>(display_metrics_.dpi.y) / 1000.0f;
+ float meters_per_pixel_x = kMetersPerInch / dpi_x;
+ float meters_per_pixel_y = kMetersPerInch / dpi_y;
+ vec2 meters_per_pixel(meters_per_pixel_x, meters_per_pixel_y);
+ double frame_duration_s =
+ static_cast<double>(display_metrics_.vsync_period_ns) / 1000000000.0;
+ // TODO(hendrikw): Hard coding to 3mm. The Pixel is actually 4mm, but it
+ // seems that their tray to lens distance is wrong too, which
+ // offsets this, at least for the pixel.
+ float border_size = 0.003f;
+ return DisplayMetrics(screen_size, meters_per_pixel, border_size,
+ static_cast<float>(frame_duration_s), orientation);
+}
+
+int32_t HardwareComposer::Validate(hwc2_display_t display) {
+ uint32_t num_types;
+ uint32_t num_requests;
+ int32_t error =
+ (int32_t)hwc2_hidl_->validateDisplay(display, &num_types, &num_requests);
+
+ if (error == HWC2_ERROR_HAS_CHANGES) {
+ // TODO(skiazyk): We might need to inspect the requested changes first, but
+ // so far it seems like we shouldn't ever hit a bad state.
+ // error = hwc2_funcs_.accept_display_changes_fn_(hardware_composer_device_,
+ // display);
+ error = (int32_t)hwc2_hidl_->acceptDisplayChanges(display);
+ }
+
+ return error;
+}
+
+int32_t HardwareComposer::EnableVsync(bool enabled) {
+ return (int32_t)hwc2_hidl_->setVsyncEnabled(
+ HWC_DISPLAY_PRIMARY,
+ (Hwc2::IComposerClient::Vsync)(enabled ? HWC2_VSYNC_ENABLE
+ : HWC2_VSYNC_DISABLE));
+}
+
+int32_t HardwareComposer::Present(hwc2_display_t display) {
+ int32_t present_fence;
+ int32_t error = (int32_t)hwc2_hidl_->presentDisplay(display, &present_fence);
+
+ // According to the documentation, this fence is signaled at the time of
+ // vsync/DMA for physical displays.
+ if (error == HWC2_ERROR_NONE) {
+ ATRACE_INT("HardwareComposer: VsyncFence", present_fence);
+ retire_fence_fds_.emplace_back(present_fence);
+ } else {
+ ATRACE_INT("HardwareComposer: PresentResult", error);
+ }
+
+ return error;
+}
+
+int32_t HardwareComposer::SetPowerMode(hwc2_display_t display,
+ hwc2_power_mode_t mode) {
+ if (mode == HWC2_POWER_MODE_OFF) {
+ EnableVsync(false);
+ }
+
+ display_on_ = mode != HWC2_POWER_MODE_OFF;
+
+ return (int32_t)hwc2_hidl_->setPowerMode(
+ display, (Hwc2::IComposerClient::PowerMode)mode);
+}
+
+int32_t HardwareComposer::GetDisplayAttribute(hwc2_display_t display,
+ hwc2_config_t config,
+ hwc2_attribute_t attribute,
+ int32_t* out_value) const {
+ return (int32_t)hwc2_hidl_->getDisplayAttribute(
+ display, config, (Hwc2::IComposerClient::Attribute)attribute, out_value);
+}
+
+int32_t HardwareComposer::GetDisplayMetrics(
+ hwc2_display_t display, hwc2_config_t config,
+ HWCDisplayMetrics* out_metrics) const {
+ int32_t ret = HWC2_ERROR_NONE;
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_WIDTH,
+ &out_metrics->width);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display width");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_HEIGHT,
+ &out_metrics->height);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display height");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_VSYNC_PERIOD,
+ &out_metrics->vsync_period_ns);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display height");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_DPI_X,
+ &out_metrics->dpi.x);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display DPI X");
+ return ret;
+ }
+
+ ret = GetDisplayAttribute(display, config, HWC2_ATTRIBUTE_DPI_Y,
+ &out_metrics->dpi.y);
+ if (ret != HWC2_ERROR_NONE) {
+ ALOGE("HardwareComposer: Failed to get display DPI Y");
+ return ret;
+ }
+
+ return HWC2_ERROR_NONE;
+}
+
+void HardwareComposer::Dump(char* buffer, uint32_t* out_size) {
+ std::string debug_str = hwc2_hidl_->dumpDebugInfo();
+ ALOGI("%s", debug_str.c_str());
+
+ if (buffer == nullptr) {
+ *out_size = debug_str.size();
+ } else {
+ std::copy(debug_str.begin(), debug_str.begin() + *out_size, buffer);
+ }
+}
+
+// TODO(skiazyk): Figure out what to do with `is_geometry_changed`. There does
+// not seem to be any equivalent in the HWC2 API, but that doesn't mean its not
+// there.
+void HardwareComposer::PostLayers(bool /*is_geometry_changed*/) {
+ ATRACE_NAME("HardwareComposer::PostLayers");
+
+ // Setup the hardware composer layers with current buffers.
+ for (size_t i = 0; i < active_layer_count_; i++) {
+ layers_[i]->Prepare();
+ }
+
+ // Now that we have taken in a frame from the application, we have a chance
+ // to drop the frame before passing the frame along to HWC.
+ // If the display driver has become backed up, we detect it here and then
+ // react by skipping this frame to catch up latency.
+ while (!retire_fence_fds_.empty() &&
+ (!retire_fence_fds_.front() ||
+ sync_wait(retire_fence_fds_.front().Get(), 0) == 0)) {
+ // There are only 2 fences in here, no performance problem to shift the
+ // array of ints.
+ retire_fence_fds_.erase(retire_fence_fds_.begin());
+ }
+
+ const bool is_frame_pending = IsFramePendingInDriver();
+ const bool is_fence_pending =
+ retire_fence_fds_.size() > kAllowedPendingFenceCount;
+
+ if (is_fence_pending || is_frame_pending) {
+ ATRACE_INT("frame_skip_count", ++frame_skip_count_);
+
+ ALOGW_IF(is_frame_pending, "Warning: frame already queued, dropping frame");
+ ALOGW_IF(is_fence_pending,
+ "Warning: dropping a frame to catch up with HWC (pending = %zd)",
+ retire_fence_fds_.size());
+
+ for (size_t i = 0; i < active_layer_count_; i++) {
+ layers_[i]->Drop();
+ }
+ return;
+ } else {
+ // Make the transition more obvious in systrace when the frame skip happens
+ // above.
+ ATRACE_INT("frame_skip_count", 0);
+ }
+
+#if TRACE
+ for (size_t i = 0; i < active_layer_count_; i++)
+ ALOGI("HardwareComposer::PostLayers: dl[%zu] ctype=0x%08x", i,
+ layers_[i]->GetCompositionType());
+#endif
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ std::vector<Hwc2::IComposerClient::Rect> full_region(1);
+ full_region[0].left = 0;
+ full_region[0].top = 0;
+ full_region[0].right = framebuffer_target_->width();
+ full_region[0].bottom = framebuffer_target_->height();
+
+ ALOGE_IF(ret, "Error setting client target : %d", ret);
+
+ ret = Validate(HWC_DISPLAY_PRIMARY);
+ if (ret) {
+ ALOGE("HardwareComposer::Validate failed; ret=%d", ret);
+ return;
+ }
+
+ ret = Present(HWC_DISPLAY_PRIMARY);
+ if (ret) {
+ ALOGE("HardwareComposer::Present failed; ret=%d", ret);
+ return;
+ }
+
+ std::vector<Hwc2::Layer> out_layers;
+ std::vector<int> out_fences;
+ ret = (int32_t)hwc2_hidl_->getReleaseFences(HWC_DISPLAY_PRIMARY, &out_layers,
+ &out_fences);
+ uint32_t num_elements = out_layers.size();
+
+ ALOGE_IF(ret, "HardwareComposer: GetReleaseFences failed; ret=%d", ret);
+
+ // Perform post-frame bookkeeping. Unused layers are a no-op.
+ for (size_t i = 0; i < num_elements; ++i) {
+ for (size_t j = 0; j < active_layer_count_; ++j) {
+ if (layers_[j]->GetLayerHandle() == out_layers[i]) {
+ layers_[j]->Finish(out_fences[i]);
+ }
+ }
+ }
+}
+
+// TODO(skiazyk): This is a work-around for the fact that we currently do not
+// handle the case when new surfaces are introduced when displayd is not
+// in an active state. A proper-solution will require re-structuring
+// displayd a little, but hopefully this is sufficient for now.
+// For example, could this be handled in |UpdateLayerSettings| instead?
+void HardwareComposer::UpdateDisplayState() {
+ const bool has_display_surfaces = display_surfaces_.size() > 0;
+
+ if (has_display_surfaces) {
+ int32_t ret = SetPowerMode(HWC_DISPLAY_PRIMARY, HWC2_POWER_MODE_ON);
+
+ ALOGE_IF(ret, "HardwareComposer: Could not set power mode; ret=%d", ret);
+
+ EnableVsync(true);
+ }
+ // TODO(skiazyk): We need to do something about accessing this directly,
+ // supposedly there is a backlight service on the way.
+ SetBacklightBrightness(255);
+
+ if (!display_on_ && has_display_surfaces) {
+ const int error = ReadVSyncTimestamp(&last_vsync_timestamp_);
+ ALOGE_IF(error < 0,
+ "HardwareComposer::SetDisplaySurfaces: Failed to read vsync "
+ "timestamp: %s",
+ strerror(-error));
+ }
+
+ // Trigger target-specific performance mode change.
+ property_set(kDvrPerformanceProperty, display_on_ ? "performance" : "idle");
+}
+
+int HardwareComposer::SetDisplaySurfaces(
+ std::vector<std::shared_ptr<DisplaySurface>> surfaces) {
+ std::lock_guard<std::mutex> autolock(layer_mutex_);
+
+ ALOGI("HardwareComposer::SetDisplaySurfaces: surface count=%zd",
+ surfaces.size());
+
+ // Figure out whether we need to update hardware layers. If this surface
+ // change does not add or remove hardware layers we can avoid display hiccups
+ // by gracefully updating only the GPU compositor layers.
+ // hardware_layers_need_update_ is reset to false by the Post thread.
+ int old_gpu_layer_count = 0;
+ int new_gpu_layer_count = 0;
+ // Look for new hardware layers and count new GPU layers.
+ for (const auto& surface : surfaces) {
+ if (!(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION))
+ ++new_gpu_layer_count;
+ else if (std::find(display_surfaces_.begin(), display_surfaces_.end(),
+ surface) == display_surfaces_.end())
+ // This is a new hardware layer, we need to update.
+ hardware_layers_need_update_ = true;
+ }
+ // Look for deleted hardware layers or compositor layers.
+ for (const auto& surface : display_surfaces_) {
+ if (!(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION))
+ ++old_gpu_layer_count;
+ else if (std::find(surfaces.begin(), surfaces.end(), surface) ==
+ surfaces.end())
+ // This is a deleted hardware layer, we need to update.
+ hardware_layers_need_update_ = true;
+ }
+ // Check for compositor hardware layer transition.
+ if ((!old_gpu_layer_count && new_gpu_layer_count) ||
+ (old_gpu_layer_count && !new_gpu_layer_count))
+ hardware_layers_need_update_ = true;
+
+ display_surfaces_ = std::move(surfaces);
+ display_surfaces_updated_ = true;
+
+ // Set the chosen layer order for all surfaces.
+ for (size_t i = 0; i < display_surfaces_.size(); ++i) {
+ display_surfaces_[i]->SetLayerOrder(static_cast<int>(i));
+ }
+
+ // TODO(skiazyk): fix this so that it is handled seamlessly with dormant/non-
+ // dormant state.
+ if (!IsSuspended()) {
+ UpdateDisplayState();
+ }
+
+ return 0;
+}
+
+// Reads the value of the display driver wait_pingpong state. Returns 0 or 1
+// (the value of the state) on success or a negative error otherwise.
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::ReadWaitPPState() {
+ // Gracefully handle when the kernel does not support this feature.
+ if (!primary_display_wait_pp_fd_)
+ return 0;
+
+ const int wait_pp_fd = primary_display_wait_pp_fd_.Get();
+ int ret, error;
+
+ ret = lseek(wait_pp_fd, 0, SEEK_SET);
+ if (ret < 0) {
+ error = errno;
+ ALOGE("HardwareComposer::ReadWaitPPState: Failed to seek wait_pp fd: %s",
+ strerror(error));
+ return -error;
+ }
+
+ char data = -1;
+ ret = read(wait_pp_fd, &data, sizeof(data));
+ if (ret < 0) {
+ error = errno;
+ ALOGE("HardwareComposer::ReadWaitPPState: Failed to read wait_pp state: %s",
+ strerror(error));
+ return -error;
+ }
+
+ switch (data) {
+ case '0':
+ return 0;
+ case '1':
+ return 1;
+ default:
+ ALOGE(
+ "HardwareComposer::ReadWaitPPState: Unexpected value for wait_pp: %d",
+ data);
+ return -EINVAL;
+ }
+}
+
+// Reads the timestamp of the last vsync from the display driver.
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::ReadVSyncTimestamp(int64_t* timestamp) {
+ const int event_fd = primary_display_vsync_event_fd_.Get();
+ int ret, error;
+
+ // The driver returns data in the form "VSYNC=<timestamp ns>".
+ std::array<char, 32> data;
+ data.fill('\0');
+
+ // Seek back to the beginning of the event file.
+ ret = lseek(event_fd, 0, SEEK_SET);
+ if (ret < 0) {
+ error = errno;
+ ALOGE(
+ "HardwareComposer::ReadVSyncTimestamp: Failed to seek vsync event fd: "
+ "%s",
+ strerror(error));
+ return -error;
+ }
+
+ // Read the vsync event timestamp.
+ ret = read(event_fd, data.data(), data.size());
+ if (ret < 0) {
+ error = errno;
+ ALOGE_IF(
+ error != EAGAIN,
+ "HardwareComposer::ReadVSyncTimestamp: Error while reading timestamp: "
+ "%s",
+ strerror(error));
+ return -error;
+ }
+
+ ret = sscanf(data.data(), "VSYNC=%" PRIu64,
+ reinterpret_cast<uint64_t*>(timestamp));
+ if (ret < 0) {
+ error = errno;
+ ALOGE(
+ "HardwareComposer::ReadVSyncTimestamp: Error while parsing timestamp: "
+ "%s",
+ strerror(error));
+ return -error;
+ }
+
+ return 0;
+}
+
+// Blocks until the next vsync event is signaled by the display driver.
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::BlockUntilVSync() {
+ const int event_fd = primary_display_vsync_event_fd_.Get();
+ pollfd pfd[2] = {
+ {
+ .fd = event_fd, .events = POLLPRI, .revents = 0,
+ },
+ // This extra event fd is to ensure that we can break out of this loop to
+ // pause the thread even when vsync is disabled, and thus no events on the
+ // vsync fd are being generated.
+ {
+ .fd = terminate_post_thread_event_fd_.Get(),
+ .events = POLLPRI | POLLIN,
+ .revents = 0,
+ },
+ };
+ int ret, error;
+ do {
+ ret = poll(pfd, 2, -1);
+ error = errno;
+ ALOGW_IF(ret < 0,
+ "HardwareComposer::BlockUntilVSync: Error while waiting for vsync "
+ "event: %s (%d)",
+ strerror(error), error);
+ } while (ret < 0 && error == EINTR);
+
+ return ret < 0 ? -error : 0;
+}
+
+// Waits for the next vsync and returns the timestamp of the vsync event. If
+// vsync already passed since the last call, returns the latest vsync timestamp
+// instead of blocking. This method updates the last_vsync_timeout_ in the
+// process.
+//
+// TODO(eieio): This is pretty driver specific, this should be moved to a
+// separate class eventually.
+int HardwareComposer::WaitForVSync(int64_t* timestamp) {
+ int error;
+
+ // Get the current timestamp and decide what to do.
+ while (true) {
+ int64_t current_vsync_timestamp;
+ error = ReadVSyncTimestamp(¤t_vsync_timestamp);
+ if (error < 0 && error != -EAGAIN)
+ return error;
+
+ if (error == -EAGAIN) {
+ // Vsync was turned off, wait for the next vsync event.
+ error = BlockUntilVSync();
+ if (error < 0)
+ return error;
+
+ // If a request to pause the post thread was given, exit immediately
+ if (IsSuspended()) {
+ return 0;
+ }
+
+ // Try again to get the timestamp for this new vsync interval.
+ continue;
+ }
+
+ // Check that we advanced to a later vsync interval.
+ if (TimestampGT(current_vsync_timestamp, last_vsync_timestamp_)) {
+ *timestamp = last_vsync_timestamp_ = current_vsync_timestamp;
+ return 0;
+ }
+
+ // See how close we are to the next expected vsync. If we're within 1ms,
+ // sleep for 1ms and try again.
+ const int64_t ns_per_frame = display_metrics_.vsync_period_ns;
+ const int64_t threshold_ns = 1000000;
+
+ const int64_t next_vsync_est = last_vsync_timestamp_ + ns_per_frame;
+ const int64_t distance_to_vsync_est = next_vsync_est - GetSystemClockNs();
+
+ if (distance_to_vsync_est > threshold_ns) {
+ // Wait for vsync event notification.
+ error = BlockUntilVSync();
+ if (error < 0)
+ return error;
+
+ // Again, exit immediately if the thread was requested to pause
+ if (IsSuspended()) {
+ return 0;
+ }
+ } else {
+ // Sleep for a short time before retrying.
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+ }
+}
+
+int HardwareComposer::SleepUntil(int64_t wakeup_timestamp) {
+ const int timer_fd = vsync_sleep_timer_fd_.Get();
+ const itimerspec wakeup_itimerspec = {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = NsToTimespec(wakeup_timestamp),
+ };
+ int ret =
+ timerfd_settime(timer_fd, TFD_TIMER_ABSTIME, &wakeup_itimerspec, nullptr);
+ int error = errno;
+ if (ret < 0) {
+ ALOGE("HardwareComposer::SleepUntil: Failed to set timerfd: %s",
+ strerror(error));
+ return -error;
+ }
+
+ // Wait for the timer by reading the expiration count.
+ uint64_t expiration_count;
+ ret = read(timer_fd, &expiration_count, sizeof(expiration_count));
+ if (ret < 0) {
+ ALOGE("HardwareComposer::SleepUntil: Failed to wait for timerfd: %s",
+ strerror(error));
+ return -error;
+ }
+
+ return 0;
+}
+
+void HardwareComposer::PostThread() {
+ // NOLINTNEXTLINE(runtime/int)
+ prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("PostThread"), 0, 0, 0);
+
+ std::unique_lock<std::mutex> thread_lock(thread_pause_mutex_);
+
+ // Set the scheduler to SCHED_FIFO with high priority.
+ int error = dvrSetSchedulerClass(0, "graphics:high");
+ LOG_ALWAYS_FATAL_IF(
+ error < 0,
+ "HardwareComposer::PostThread: Failed to set scheduler class: %s",
+ strerror(-error));
+ error = dvrSetCpuPartition(0, "/system/performance");
+ LOG_ALWAYS_FATAL_IF(
+ error < 0,
+ "HardwareComposer::PostThread: Failed to set cpu partition: %s",
+ strerror(-error));
+
+ // Force the layers to be setup at least once.
+ display_surfaces_updated_ = true;
+
+ // Initialize the GPU compositor.
+ LOG_ALWAYS_FATAL_IF(!compositor_.Initialize(GetHmdDisplayMetrics()),
+ "Failed to initialize the compositor");
+
+ const int64_t ns_per_frame = display_metrics_.vsync_period_ns;
+ const int64_t photon_offset_ns = GetPosePredictionTimeOffset(ns_per_frame);
+
+ // TODO(jbates) Query vblank time from device, when such an API is available.
+ // This value (6.3%) was measured on A00 in low persistence mode.
+ int64_t vblank_ns = ns_per_frame * 63 / 1000;
+ int64_t right_eye_photon_offset_ns = (ns_per_frame - vblank_ns) / 2;
+
+ // Check property for overriding right eye offset value.
+ right_eye_photon_offset_ns =
+ property_get_int64(kRightEyeOffsetProperty, right_eye_photon_offset_ns);
+
+ // The list of surfaces the compositor should attempt to render. This is set
+ // at the start of each frame.
+ std::vector<std::shared_ptr<DisplaySurface>> compositor_surfaces;
+ compositor_surfaces.reserve(2);
+
+ // Our history of frame times. This is used to get a better estimate of how
+ // long the next frame will take, to set a schedule for EDS.
+ FrameTimeHistory frame_time_history;
+
+ // The backlog is used to allow us to start rendering the next frame before
+ // the previous frame has finished, and still get an accurate measurement of
+ // frame duration.
+ std::vector<FrameTimeMeasurementRecord> frame_time_backlog;
+ constexpr int kFrameTimeBacklogMax = 2;
+ frame_time_backlog.reserve(kFrameTimeBacklogMax);
+
+ // Storage for retrieving fence info.
+ FenceInfoBuffer fence_info_buffer;
+
+ while (1) {
+ ATRACE_NAME("HardwareComposer::PostThread");
+
+ while (IsSuspended()) {
+ ALOGI("HardwareComposer::PostThread: Post thread pause requested.");
+ thread_pause_semaphore_.wait(thread_lock);
+ // The layers will need to be updated since they were deleted previously
+ display_surfaces_updated_ = true;
+ hardware_layers_need_update_ = true;
+ }
+
+ int64_t vsync_timestamp = 0;
+ {
+ std::array<char, 128> buf;
+ snprintf(buf.data(), buf.size(), "wait_vsync|vsync=%d|",
+ vsync_count_ + 1);
+ ATRACE_NAME(buf.data());
+
+ error = WaitForVSync(&vsync_timestamp);
+ ALOGE_IF(
+ error < 0,
+ "HardwareComposer::PostThread: Failed to wait for vsync event: %s",
+ strerror(-error));
+
+ // Don't bother processing this frame if a pause was requested
+ if (IsSuspended()) {
+ continue;
+ }
+ }
+
+ ++vsync_count_;
+
+ static double last_print_time = -1;
+ double current_time = GetSystemClockSec();
+ if (last_print_time < 0 || current_time - last_print_time > 3) {
+ last_print_time = current_time;
+ }
+
+ if (pose_client_) {
+ // Signal the pose service with vsync info.
+ // Display timestamp is in the middle of scanout.
+ privateDvrPoseNotifyVsync(pose_client_, vsync_count_,
+ vsync_timestamp + photon_offset_ns,
+ ns_per_frame, right_eye_photon_offset_ns);
+ }
+
+ bool layer_config_changed = UpdateLayerConfig(&compositor_surfaces);
+
+ if (layer_config_changed) {
+ frame_time_history.ResetWithSeed(
+ GuessFrameTime(compositor_surfaces.size()));
+ frame_time_backlog.clear();
+ } else {
+ UpdateFrameTimeHistory(&frame_time_backlog, kFrameTimeBacklogMax,
+ &fence_info_buffer, &frame_time_history);
+ }
+
+ // Get our current best estimate at how long the next frame will take to
+ // render, based on how long previous frames took to render. Use this
+ // estimate to decide when to wake up for EDS.
+ int64_t frame_time_estimate =
+ frame_time_history.GetSampleCount() == 0
+ ? GuessFrameTime(compositor_surfaces.size())
+ : frame_time_history.GetAverage();
+ frame_time_estimate = std::max(frame_time_estimate, kFrameTimeEstimateMin);
+ DebugHudData::data.hwc_latency = frame_time_estimate;
+
+ // Signal all of the vsync clients. Because absolute time is used for the
+ // wakeup time below, this can take a little time if necessary.
+ if (vsync_callback_)
+ vsync_callback_(HWC_DISPLAY_PRIMARY, vsync_timestamp, frame_time_estimate,
+ vsync_count_);
+
+ {
+ // Sleep until async EDS wakeup time.
+ ATRACE_NAME("sleep");
+
+ int64_t display_time_est = vsync_timestamp + ns_per_frame;
+ int64_t now = GetSystemClockNs();
+ int64_t frame_finish_time_est = now + frame_time_estimate;
+ int64_t sleep_time_ns = display_time_est - now - frame_time_estimate;
+
+ ATRACE_INT64("sleep_time_ns", sleep_time_ns);
+ if (frame_finish_time_est - display_time_est >= kFrameSkipThresholdNs) {
+ ATRACE_INT("frame_skip_count", ++frame_skip_count_);
+ ALOGE(
+ "HardwareComposer::PostThread: Missed frame schedule, drop "
+ "frame. Expected frame miss: %.1fms",
+ static_cast<double>(frame_finish_time_est - display_time_est) /
+ 1000000);
+
+ // There are several reasons we might skip a frame, but one possibility
+ // is we mispredicted the frame time. Clear out the frame time history.
+ frame_time_history.ResetWithSeed(
+ GuessFrameTime(compositor_surfaces.size()));
+ frame_time_backlog.clear();
+ DebugHudData::data.hwc_frame_stats.SkipFrame();
+
+ continue;
+ } else {
+ // Make the transition more obvious in systrace when the frame skip
+ // happens above.
+ ATRACE_INT("frame_skip_count", 0);
+ }
+
+ if (sleep_time_ns > 0) {
+ error = SleepUntil(display_time_est - frame_time_estimate);
+ ALOGE_IF(error < 0, "HardwareComposer::PostThread: Failed to sleep: %s",
+ strerror(-error));
+ }
+ }
+
+ DebugHudData::data.hwc_frame_stats.AddFrame();
+
+ int64_t frame_start_time = GetSystemClockNs();
+
+ // Setup the output buffer for the compositor. This needs to happen before
+ // you draw with the compositor.
+ if (gpu_layer_ != nullptr) {
+ gpu_layer_->UpdateDirectBuffer(compositor_.GetBuffer());
+ }
+
+ // Call PostLayers now before performing the GL code for the compositor to
+ // avoid missing the deadline that can cause the lower-level hwc to get
+ // permanently backed up.
+ PostLayers(layer_config_changed);
+
+ PostCompositorBuffers(compositor_surfaces);
+
+ if (gpu_layer_ != nullptr) {
+ // Note, with scanline racing, this draw is timed along with the post
+ // layers to finish just in time.
+ LocalHandle frame_fence_fd;
+ compositor_.DrawFrame(vsync_count_ + 1, &frame_fence_fd);
+ if (frame_fence_fd) {
+ LOG_ALWAYS_FATAL_IF(frame_time_backlog.size() >= kFrameTimeBacklogMax,
+ "Frame time backlog exceeds capacity");
+ frame_time_backlog.push_back(
+ {frame_start_time, std::move(frame_fence_fd)});
+ }
+ } else if (!layer_config_changed) {
+ frame_time_history.AddSample(GetSystemClockNs() - frame_start_time);
+ }
+
+ HandlePendingScreenshots();
+ }
+
+ // TODO(skiazyk): Currently the compositor is not fully releasing its EGL
+ // context, which seems to prevent the thread from exiting properly.
+ // This shouldn't be too hard to address, I just don't have time right now.
+ compositor_.Shutdown();
+}
+
+bool HardwareComposer::UpdateLayerConfig(
+ std::vector<std::shared_ptr<DisplaySurface>>* compositor_surfaces) {
+ std::lock_guard<std::mutex> autolock(layer_mutex_);
+
+ if (!display_surfaces_updated_)
+ return false;
+
+ display_surfaces_updated_ = false;
+ DebugHudData::data.ResetLayers();
+
+ // Update compositor layers.
+ {
+ ATRACE_NAME("UpdateLayerConfig_GpuLayers");
+ compositor_.UpdateSurfaces(display_surfaces_);
+ compositor_surfaces->clear();
+ for (size_t i = 0; i < display_surfaces_.size(); ++i) {
+ const auto& surface = display_surfaces_[i];
+ if (!(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION)) {
+ compositor_surfaces->push_back(surface);
+ }
+ }
+ }
+
+ if (!hardware_layers_need_update_)
+ return true;
+
+ // Update hardware layers.
+
+ ATRACE_NAME("UpdateLayerConfig_HwLayers");
+ hardware_layers_need_update_ = false;
+
+ // Update the display layers in a non-destructive fashion.
+
+ // Create a map from surface id to hardware layer
+ std::map<int, Layer*> display_surface_layers;
+
+ for (size_t i = 0; i < active_layer_count_; ++i) {
+ auto layer = layers_[i];
+ int surface_id = layer->GetSurfaceId();
+
+ auto found =
+ std::find_if(display_surfaces_.begin(), display_surfaces_.end(),
+ [surface_id](const auto& surface) {
+ return surface->surface_id() == surface_id;
+ });
+
+ if (found != display_surfaces_.end()) {
+ display_surface_layers[surface_id] = layer;
+ }
+ }
+
+ bool has_gpu_layer = std::any_of(
+ display_surfaces_.begin(), display_surfaces_.end(),
+ [](const auto& surface) {
+ return !(surface->flags() &
+ DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION);
+ });
+
+ if (!has_gpu_layer) {
+ gpu_layer_ = nullptr;
+ }
+
+ auto is_layer_active = [&display_surface_layers, has_gpu_layer](auto layer) {
+ int surface_id = layer->GetSurfaceId();
+ if (surface_id >= 0) {
+ return display_surface_layers.count(surface_id) > 0;
+ } else {
+ return has_gpu_layer;
+ }
+ };
+
+ // Compress the in-use layers to the top of the list
+ auto part = std::partition(
+ layers_.begin(), layers_.begin() + active_layer_count_, is_layer_active);
+
+ size_t new_active_layer_count = part - layers_.begin();
+
+ // Clear any unused layers
+ for (size_t i = new_active_layer_count; i < active_layer_count_; ++i) {
+ layers_[i]->Reset();
+ }
+
+ active_layer_count_ = new_active_layer_count;
+
+ bool gpu_layer_applied = false;
+
+ // Create/update all of the hardware layers
+ for (size_t i = 0; i < display_surfaces_.size(); ++i) {
+ const auto& surface = display_surfaces_[i];
+ bool is_hw_surface =
+ surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_DISABLE_SYSTEM_DISTORTION;
+ hwc2_blend_mode_t blending =
+ i == 0 ? HWC2_BLEND_MODE_NONE : HWC2_BLEND_MODE_COVERAGE;
+
+ DebugHudData::data.SetLayerInfo(
+ i, surface->width(), surface->height(),
+ !!(surface->flags() & DVR_DISPLAY_SURFACE_FLAGS_GEOMETRY_SEPARATE_2));
+
+ if (!is_hw_surface && gpu_layer_applied) {
+ continue;
+ }
+
+ Layer* target_layer;
+ bool existing_layer = false;
+
+ if (is_hw_surface) {
+ auto it = display_surface_layers.find(surface->surface_id());
+
+ if (it != display_surface_layers.end()) {
+ target_layer = it->second;
+ existing_layer = true;
+ }
+ } else if (gpu_layer_ != nullptr) {
+ target_layer = gpu_layer_;
+ existing_layer = true;
+ }
+
+ if (!existing_layer) {
+ if (active_layer_count_ >= kMaxHardwareLayers) {
+ ALOGI("HardwareComposer: More than %d hardware layers requested.",
+ kMaxHardwareLayers);
+ break;
+ } else {
+ target_layer = layers_[active_layer_count_];
+ ++active_layer_count_;
+ }
+
+ ALOGD_IF(TRACE,
+ "HardwareComposer::UpdateLayerConfig: (new) surface_id=%d -> "
+ "layer=%zd",
+ surface->surface_id(), i);
+
+ if (is_hw_surface) {
+ target_layer->Setup(surface, blending, display_transform_,
+ HWC2_COMPOSITION_DEVICE, i);
+ } else {
+ gpu_layer_ = target_layer;
+ target_layer->Setup(compositor_.GetBuffer(), blending,
+ display_transform_, HWC2_COMPOSITION_DEVICE, i);
+ }
+ } else {
+ ALOGD_IF(TRACE,
+ "HardwareComposer::UpdateLayerConfig: (retained) surface_id=%d "
+ "-> layer=%zd",
+ surface->surface_id(), i);
+
+ target_layer->SetBlending(blending);
+ target_layer->SetZOrderIndex(i);
+ target_layer->UpdateLayerSettings();
+ }
+
+ gpu_layer_applied = !is_hw_surface;
+ }
+
+ ALOGD_IF(TRACE, "HardwareComposer::UpdateLayerConfig: %zd active layers",
+ active_layer_count_);
+
+ return true;
+}
+
+void HardwareComposer::PostCompositorBuffers(
+ const std::vector<std::shared_ptr<DisplaySurface>>& compositor_surfaces) {
+ ATRACE_NAME("PostCompositorBuffers");
+ for (const auto& surface : compositor_surfaces) {
+ compositor_.PostBuffer(surface);
+ }
+}
+
+void HardwareComposer::UpdateFrameTimeHistory(
+ std::vector<FrameTimeMeasurementRecord>* backlog, int backlog_max,
+ FenceInfoBuffer* fence_info_buffer, FrameTimeHistory* history) {
+ while (!backlog->empty()) {
+ const auto& frame_time_record = backlog->front();
+ int64_t end_time = 0;
+ bool frame_finished = CheckFrameFinished(frame_time_record.fence.Get(),
+ fence_info_buffer, &end_time);
+ if (frame_finished) {
+ int64_t frame_duration = end_time - frame_time_record.start_time;
+ history->AddSample(frame_duration);
+ // Our backlog is tiny (2 elements), so erasing from the front is ok
+ backlog->erase(backlog->begin());
+ } else {
+ break;
+ }
+ }
+
+ if (backlog->size() == static_cast<size_t>(backlog_max)) {
+ // Yikes, something must've gone wrong if our oldest frame hasn't finished
+ // yet. Give up on waiting for it.
+ const auto& stale_frame_time_record = backlog->front();
+ int64_t frame_duration =
+ GetSystemClockNs() - stale_frame_time_record.start_time;
+ backlog->erase(backlog->begin());
+ history->AddSample(frame_duration);
+ ALOGW("Frame didn't finish after %.1fms",
+ static_cast<double>(frame_duration) / 1000000);
+ }
+}
+
+bool HardwareComposer::CheckFrameFinished(int frame_fence_fd,
+ FenceInfoBuffer* fence_info_buffer,
+ int64_t* timestamp) {
+ int result = -1;
+ int sync_result = sync_wait(frame_fence_fd, 0);
+ if (sync_result == 0) {
+ result =
+ GetFenceSignaledTimestamp(frame_fence_fd, fence_info_buffer, timestamp);
+ if (result < 0) {
+ ALOGE("Failed getting signaled timestamp from fence");
+ }
+ } else if (errno != ETIME) {
+ ALOGE("sync_wait on frame fence failed");
+ }
+ return result >= 0;
+}
+
+void HardwareComposer::HandlePendingScreenshots() {
+ // Take a screenshot of the requested layer, if available.
+ // TODO(eieio): Look into using virtual displays to composite the layer stack
+ // into a single output buffer that can be returned to the screenshot clients.
+ if (active_layer_count_ > 0) {
+ if (auto screenshot_service = ScreenshotService::GetInstance()) {
+ if (screenshot_service->IsScreenshotRequestPending()) {
+ ATRACE_NAME("screenshot");
+ screenshot_service->TakeIfNeeded(layers_, compositor_);
+ }
+ } else {
+ ALOGW(
+ "HardwareComposer::HandlePendingScreenshots: Failed to get "
+ "screenshot service!");
+ }
+ }
+}
+
+void HardwareComposer::SetVSyncCallback(VSyncCallback callback) {
+ vsync_callback_ = callback;
+}
+
+void HardwareComposer::HwcRefresh(hwc2_callback_data_t /*data*/,
+ hwc2_display_t /*display*/) {
+ // TODO(eieio): implement invalidate callbacks.
+}
+
+void HardwareComposer::HwcVSync(hwc2_callback_data_t /*data*/,
+ hwc2_display_t /*display*/,
+ int64_t /*timestamp*/) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+ // Intentionally empty. HWC may require a callback to be set to enable vsync
+ // signals. We bypass this callback thread by monitoring the vsync event
+ // directly, but signals still need to be enabled.
+}
+
+void HardwareComposer::HwcHotplug(hwc2_callback_data_t /*callbackData*/,
+ hwc2_display_t /*display*/,
+ hwc2_connection_t /*connected*/) {
+ // TODO(eieio): implement display hotplug callbacks.
+}
+
+void HardwareComposer::SetBacklightBrightness(int brightness) {
+ if (backlight_brightness_fd_) {
+ std::array<char, 32> text;
+ const int length = snprintf(text.data(), text.size(), "%d", brightness);
+ write(backlight_brightness_fd_.Get(), text.data(), length);
+ }
+}
+
+Layer::Layer()
+ : hwc2_hidl_(nullptr),
+ surface_index_(-1),
+ hardware_composer_layer_(0),
+ display_metrics_(nullptr),
+ blending_(HWC2_BLEND_MODE_NONE),
+ transform_(HWC_TRANSFORM_NONE),
+ composition_type_(HWC2_COMPOSITION_DEVICE),
+ surface_rect_functions_applied_(false) {}
+
+void Layer::Initialize(Hwc2::Composer* hwc2_hidl, HWCDisplayMetrics* metrics) {
+ hwc2_hidl_ = hwc2_hidl;
+ display_metrics_ = metrics;
+}
+
+void Layer::Reset() {
+ const int ret = acquired_buffer_.Release(std::move(release_fence_));
+ ALOGE_IF(ret < 0, "Layer::Reset: failed to release buffer: %s",
+ strerror(-ret));
+
+ if (hwc2_hidl_ != nullptr && hardware_composer_layer_) {
+ hwc2_hidl_->destroyLayer(HWC_DISPLAY_PRIMARY, hardware_composer_layer_);
+ hardware_composer_layer_ = 0;
+ }
+
+ surface_index_ = static_cast<size_t>(-1);
+ blending_ = HWC2_BLEND_MODE_NONE;
+ transform_ = HWC_TRANSFORM_NONE;
+ composition_type_ = HWC2_COMPOSITION_DEVICE;
+ direct_buffer_ = nullptr;
+ surface_ = nullptr;
+ acquire_fence_fd_.Close();
+ surface_rect_functions_applied_ = false;
+}
+
+void Layer::Setup(const std::shared_ptr<DisplaySurface>& surface,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t index) {
+ Reset();
+ surface_index_ = index;
+ surface_ = surface;
+ blending_ = blending;
+ transform_ = transform;
+ composition_type_ = composition_type;
+ CommonLayerSetup();
+}
+
+void Layer::Setup(const std::shared_ptr<IonBuffer>& buffer,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t z_order) {
+ Reset();
+ surface_index_ = z_order;
+ direct_buffer_ = buffer;
+ blending_ = blending;
+ transform_ = transform;
+ composition_type_ = composition_type;
+ CommonLayerSetup();
+}
+
+void Layer::UpdateDirectBuffer(const std::shared_ptr<IonBuffer>& buffer) {
+ direct_buffer_ = buffer;
+}
+
+void Layer::SetBlending(hwc2_blend_mode_t blending) { blending_ = blending; }
+
+void Layer::SetZOrderIndex(int z_index) { surface_index_ = z_index; }
+
+IonBuffer* Layer::GetBuffer() {
+ if (direct_buffer_)
+ return direct_buffer_.get();
+ else if (acquired_buffer_.IsAvailable())
+ return acquired_buffer_.buffer()->buffer();
+ else
+ return nullptr;
+}
+
+void Layer::UpdateLayerSettings() {
+ if (!IsLayerSetup()) {
+ ALOGE("HardwareComposer: Trying to update layers data on an unused layer.");
+ return;
+ }
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ hwc2_display_t display = HWC_DISPLAY_PRIMARY;
+
+ ret = (int32_t)hwc2_hidl_->setLayerCompositionType(
+ display, hardware_composer_layer_,
+ (Hwc2::IComposerClient::Composition)composition_type_);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer composition type : %d",
+ ret);
+ // ret = (int32_t) hwc2_hidl_->setLayerTransform(display,
+ // hardware_composer_layer_,
+ // (Hwc2::IComposerClient::Transform)
+ // transform_);
+ // ALOGE_IF(ret, "HardwareComposer: Error setting layer transform : %d", ret);
+
+ // ret = hwc2_funcs_->set_layer_blend_mode_fn_(
+ // hardware_composer_device_, display, hardware_composer_layer_,
+ // blending_);
+ ret = (int32_t)hwc2_hidl_->setLayerBlendMode(
+ display, hardware_composer_layer_,
+ (Hwc2::IComposerClient::BlendMode)blending_);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer blend mode : %d", ret);
+
+ Hwc2::IComposerClient::Rect display_frame;
+ display_frame.left = 0;
+ display_frame.top = 0;
+ display_frame.right = display_metrics_->width;
+ display_frame.bottom = display_metrics_->height;
+ ret = (int32_t)hwc2_hidl_->setLayerDisplayFrame(
+ display, hardware_composer_layer_, display_frame);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer display frame : %d",
+ ret);
+
+ std::vector<Hwc2::IComposerClient::Rect> visible_region(1);
+ visible_region[0] = display_frame;
+ ret = (int32_t)hwc2_hidl_->setLayerVisibleRegion(
+ display, hardware_composer_layer_, visible_region);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer visible region : %d",
+ ret);
+
+ ret = (int32_t)hwc2_hidl_->setLayerPlaneAlpha(display,
+ hardware_composer_layer_, 1.0f);
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer plane alpha : %d", ret);
+
+ ret = (int32_t)hwc2_hidl_->setLayerZOrder(display, hardware_composer_layer_,
+ surface_index_);
+ ALOGE_IF(ret, "HardwareComposer: Error, setting z order index : %d", ret);
+}
+
+void Layer::CommonLayerSetup() {
+ int32_t ret = (int32_t)hwc2_hidl_->createLayer(HWC_DISPLAY_PRIMARY,
+ &hardware_composer_layer_);
+
+ ALOGE_IF(ret,
+ "HardwareComposer: Failed to create layer on primary display : %d",
+ ret);
+
+ UpdateLayerSettings();
+}
+
+void Layer::Prepare() {
+ int right, bottom;
+ buffer_handle_t handle;
+
+ if (surface_) {
+ // Only update the acquired buffer when one is either available or this is
+ // the first time through.
+ if (surface_->IsBufferAvailable()) {
+ // If we previously set this to a solid color layer to stall for time,
+ // revert it to a device layer.
+ if (acquired_buffer_.IsEmpty() &&
+ composition_type_ != HWC2_COMPOSITION_DEVICE) {
+ composition_type_ = HWC2_COMPOSITION_DEVICE;
+ hwc2_hidl_->setLayerCompositionType(
+ HWC_DISPLAY_PRIMARY, hardware_composer_layer_,
+ (Hwc2::IComposerClient::Composition)HWC2_COMPOSITION_DEVICE);
+ }
+
+ DebugHudData::data.AddLayerFrame(surface_index_);
+ acquired_buffer_.Release(std::move(release_fence_));
+ acquired_buffer_ = surface_->AcquireCurrentBuffer();
+
+ // Basic latency stopgap for when the application misses a frame:
+ // If the application recovers on the 2nd or 3rd (etc) frame after
+ // missing, this code will skip a frame to catch up by checking if
+ // the next frame is also available.
+ if (surface_->IsBufferAvailable()) {
+ DebugHudData::data.SkipLayerFrame(surface_index_);
+ ATRACE_NAME("DropToCatchUp");
+ ATRACE_ASYNC_END("BufferPost", acquired_buffer_.buffer()->id());
+ acquired_buffer_ = surface_->AcquireCurrentBuffer();
+ }
+ ATRACE_ASYNC_END("BufferPost", acquired_buffer_.buffer()->id());
+ } else if (acquired_buffer_.IsEmpty()) {
+ // While we are waiting for a buffer, set this to be an empty layer
+ if (composition_type_ != HWC2_COMPOSITION_SOLID_COLOR) {
+ composition_type_ = HWC2_COMPOSITION_SOLID_COLOR;
+ hwc2_hidl_->setLayerCompositionType(
+ HWC_DISPLAY_PRIMARY, hardware_composer_layer_,
+ (Hwc2::IComposerClient::Composition)HWC2_COMPOSITION_SOLID_COLOR);
+
+ Hwc2::IComposerClient::Color layer_color = {
+ 0, 0, 0, 0,
+ };
+ hwc2_hidl_->setLayerColor(HWC_DISPLAY_PRIMARY, hardware_composer_layer_,
+ layer_color);
+ }
+ return;
+ }
+ right = acquired_buffer_.buffer()->width();
+ bottom = acquired_buffer_.buffer()->height();
+ handle = acquired_buffer_.buffer()->native_handle();
+ acquire_fence_fd_.Reset(acquired_buffer_.ClaimAcquireFence().Release());
+ } else {
+ right = direct_buffer_->width();
+ bottom = direct_buffer_->height();
+ handle = direct_buffer_->handle();
+ acquire_fence_fd_.Close();
+ }
+
+ int32_t ret = HWC2_ERROR_NONE;
+
+ if (composition_type_ == HWC2_COMPOSITION_DEVICE) {
+ ret = (int32_t)hwc2_hidl_->setLayerBuffer(HWC_DISPLAY_PRIMARY,
+ hardware_composer_layer_, handle,
+ acquire_fence_fd_.Get());
+
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer buffer : %d", ret);
+ }
+
+ if (!surface_rect_functions_applied_) {
+ Hwc2::IComposerClient::FRect crop_rect = {
+ 0, 0, static_cast<float>(right), static_cast<float>(bottom),
+ };
+ hwc2_hidl_->setLayerSourceCrop(HWC_DISPLAY_PRIMARY,
+ hardware_composer_layer_, crop_rect);
+
+ ALOGE_IF(ret, "HardwareComposer: Error setting layer source crop : %d",
+ ret);
+
+// TODO(skiazyk): why is this ifdef'd out. Is if a driver-specific issue where
+// it must/cannot be called?
+#ifdef QCOM_BSP
+ hwc_rect_t damage_rect = {
+ 0, 0, right, bottom,
+ };
+ hwc_region_t damage = {
+ 1, &damage_rect,
+ };
+ // ret = hwc2_funcs_->set_layer_surface_damage(
+ // hardware_composer_device_, HWC_DISPLAY_PRIMARY,
+ // hardware_composer_layer_, damage);
+ // uses a std::vector as the listing
+ // hwc2_hidl_->setLayerSurfaceDamage(HWC_DISPLAY_PRIMARY,
+ // hardware_composer_layer_, vector here);
+
+ ALOGE_IF(ret, "HardwareComposer: Error settings layer surface damage : %d",
+ ret);
+#endif
+
+ surface_rect_functions_applied_ = true;
+ }
+}
+
+void Layer::Finish(int release_fence_fd) {
+ release_fence_.Reset(release_fence_fd);
+}
+
+void Layer::Drop() { acquire_fence_fd_.Close(); }
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/hardware_composer.h b/libs/vr/libvrflinger/hardware_composer.h
new file mode 100644
index 0000000..cfe8c84
--- /dev/null
+++ b/libs/vr/libvrflinger/hardware_composer.h
@@ -0,0 +1,406 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_HARDWARE_COMPOSER_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_HARDWARE_COMPOSER_H_
+
+#include <log/log.h>
+#include <hardware/gralloc.h>
+#include <hardware/hardware.h>
+#include <hardware/hwcomposer2.h>
+
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/sync_util.h>
+
+#include <array>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <tuple>
+#include <vector>
+
+#include <pdx/file_handle.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/frame_time_history.h>
+#include <private/dvr/sync_util.h>
+
+#include "acquired_buffer.h"
+#include "compositor.h"
+#include "display_surface.h"
+
+#include "DisplayHardware/ComposerHal.h"
+
+// Hardware composer HAL doesn't define HWC_TRANSFORM_NONE as of this writing.
+#ifndef HWC_TRANSFORM_NONE
+#define HWC_TRANSFORM_NONE static_cast<hwc_transform_t>(0)
+#endif
+
+namespace android {
+namespace dvr {
+
+// Basic display metrics for physical displays. Dimensions and densities are
+// relative to the physical display orientation, which may be different from the
+// logical display orientation exposed to applications.
+struct HWCDisplayMetrics {
+ int width;
+ int height;
+ struct {
+ int x;
+ int y;
+ } dpi;
+ int vsync_period_ns;
+};
+
+// Layer represents the connection between a hardware composer layer and the
+// source supplying buffers for the layer's contents.
+class Layer {
+ public:
+ Layer();
+
+ // Sets the hardware composer layer and display metrics that this Layer should
+ // use each Prepare cycle. This class does not own either of these pointers,
+ // which MUST remain valid for its lifetime. This method MUST be called once
+ // in the life of the instance before any other method is valid to call.
+ void Initialize(Hwc2::Composer* hwc2_hidl, HWCDisplayMetrics* metrics);
+
+ // Releases any shared pointers and fence handles held by this instance.
+ void Reset();
+
+ // Sets up the layer to use a display surface as its content source. The Layer
+ // will automatically handle ACQUIRE/RELEASE phases for the surface's buffer
+ // train every frame.
+ //
+ // |blending| receives HWC_BLENDING_* values.
+ // |transform| receives HWC_TRANSFORM_* values.
+ // |composition_type| receives either HWC_FRAMEBUFFER for most layers or
+ // HWC_FRAMEBUFFER_TARGET (unless you know what you are doing).
+ // |index| is the index of this surface in the DisplaySurface array.
+ void Setup(const std::shared_ptr<DisplaySurface>& surface,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t index);
+
+ // Sets up the layer to use a direct buffer as its content source. No special
+ // handling of the buffer is performed; responsibility for updating or
+ // changing the buffer each frame is on the caller.
+ //
+ // |blending| receives HWC_BLENDING_* values.
+ // |transform| receives HWC_TRANSFORM_* values.
+ // |composition_type| receives either HWC_FRAMEBUFFER for most layers or
+ // HWC_FRAMEBUFFER_TARGET (unless you know what you are doing).
+ void Setup(const std::shared_ptr<IonBuffer>& buffer,
+ hwc2_blend_mode_t blending, hwc_transform_t transform,
+ hwc2_composition_t composition_type, size_t z_order);
+
+ // Layers that use a direct IonBuffer should call this each frame to update
+ // which buffer will be used for the next PostLayers.
+ void UpdateDirectBuffer(const std::shared_ptr<IonBuffer>& buffer);
+
+ // Sets up the hardware composer layer for the next frame. When the layer is
+ // associated with a display surface, this method automatically ACQUIRES a new
+ // buffer if one is available.
+ void Prepare();
+
+ // After calling prepare, if this frame is to be dropped instead of passing
+ // along to the HWC, call Drop to close the contained fence(s).
+ void Drop();
+
+ // Performs fence bookkeeping after the frame has been posted to hardware
+ // composer.
+ void Finish(int release_fence_fd);
+
+ // Sets the blending for the layer. |blending| receives HWC_BLENDING_* values.
+ void SetBlending(hwc2_blend_mode_t blending);
+
+ // Sets the Z-order of this layer
+ void SetZOrderIndex(int surface_index);
+
+ // Gets the current IonBuffer associated with this layer. Ownership of the
+ // buffer DOES NOT pass to the caller and the pointer is not guaranteed to
+ // remain valid across calls to Layer::Setup(), Layer::Prepare(), or
+ // Layer::Reset(). YOU HAVE BEEN WARNED.
+ IonBuffer* GetBuffer();
+
+ hwc2_composition_t GetCompositionType() const { return composition_type_; }
+
+ hwc2_layer_t GetLayerHandle() const { return hardware_composer_layer_; }
+
+ bool UsesDirectBuffer() const { return direct_buffer_ != nullptr; }
+
+ bool IsLayerSetup() const {
+ return direct_buffer_ != nullptr || surface_ != nullptr;
+ }
+
+ // Applies all of the settings to this layer using the hwc functions
+ void UpdateLayerSettings();
+
+ int GetSurfaceId() const {
+ if (surface_ != nullptr) {
+ return surface_->surface_id();
+ } else {
+ return -1;
+ }
+ }
+
+ private:
+ void CommonLayerSetup();
+
+ Hwc2::Composer* hwc2_hidl_;
+
+ // Original display surface array index for tracking purposes.
+ size_t surface_index_;
+
+ // The hardware composer layer and metrics to use during the prepare cycle.
+ hwc2_layer_t hardware_composer_layer_;
+ HWCDisplayMetrics* display_metrics_;
+
+ // Layer properties used to setup the hardware composer layer during the
+ // Prepare phase.
+ hwc2_blend_mode_t blending_;
+ hwc_transform_t transform_;
+ hwc2_composition_t composition_type_;
+
+ // These two members are mutually exclusive. When direct_buffer_ is set the
+ // Layer gets its contents directly from that buffer; when surface_ is set the
+ // Layer gets it contents from the surface's buffer train.
+ std::shared_ptr<IonBuffer> direct_buffer_;
+ std::shared_ptr<DisplaySurface> surface_;
+
+ // State when associated with a display surface.
+ AcquiredBuffer acquired_buffer_;
+ pdx::LocalHandle release_fence_;
+
+ pdx::LocalHandle acquire_fence_fd_;
+ bool surface_rect_functions_applied_;
+
+ Layer(const Layer&) = delete;
+ void operator=(const Layer&) = delete;
+};
+
+// HardwareComposer encapsulates the hardware composer HAL, exposing a
+// simplified API to post buffers to the display.
+class HardwareComposer {
+ public:
+ // Type for vsync callback.
+ using VSyncCallback = std::function<void(int, int64_t, int64_t, uint32_t)>;
+
+ // Since there is no universal way to query the number of hardware layers,
+ // just set it to 4 for now.
+ static constexpr int kMaxHardwareLayers = 4;
+
+ HardwareComposer();
+ HardwareComposer(Hwc2::Composer* hidl);
+ ~HardwareComposer();
+
+ bool Suspend();
+ bool Resume();
+ bool IsSuspended() const { return pause_post_thread_; }
+
+ // Get the HMD display metrics for the current display.
+ DisplayMetrics GetHmdDisplayMetrics() const;
+
+ int32_t GetDisplayAttribute(hwc2_display_t display, hwc2_config_t config,
+ hwc2_attribute_t attributes,
+ int32_t* out_value) const;
+ int32_t GetDisplayMetrics(hwc2_display_t display, hwc2_config_t config,
+ HWCDisplayMetrics* out_metrics) const;
+ void Dump(char* buffer, uint32_t* out_size);
+
+ void SetVSyncCallback(VSyncCallback callback);
+
+ // Metrics of the logical display, which is always landscape.
+ int DisplayWidth() const { return display_metrics_.width; }
+ int DisplayHeight() const { return display_metrics_.height; }
+ HWCDisplayMetrics display_metrics() const { return display_metrics_; }
+
+ // Metrics of the native display, which depends on the specific hardware
+ // implementation of the display.
+ HWCDisplayMetrics native_display_metrics() const {
+ return native_display_metrics_;
+ }
+
+ std::shared_ptr<IonBuffer> framebuffer_target() const {
+ return framebuffer_target_;
+ }
+
+ // Set the display surface stack to compose to the display each frame.
+ int SetDisplaySurfaces(std::vector<std::shared_ptr<DisplaySurface>> surfaces);
+
+ Compositor* GetCompositor() { return &compositor_; }
+
+ private:
+ int32_t EnableVsync(bool enabled);
+ int32_t SetPowerMode(hwc2_display_t display, hwc2_power_mode_t mode);
+
+ class ComposerCallback : public Hwc2::IComposerCallback {
+ public:
+ ComposerCallback() {}
+
+ hardware::Return<void> onHotplug(Hwc2::Display /*display*/,
+ Connection /*connected*/) override {
+ // TODO(skiazyk): depending on how the server is implemented, we might
+ // have to set it up to synchronize with receiving this event, as it can
+ // potentially be a critical event for setting up state within the
+ // hwc2 module. That is, we (technically) should not call any other hwc
+ // methods until this method has been called after registering the
+ // callbacks.
+ return hardware::Void();
+ }
+
+ hardware::Return<void> onRefresh(Hwc2::Display /*display*/) override {
+ return hardware::Void();
+ }
+
+ hardware::Return<void> onVsync(Hwc2::Display /*display*/,
+ int64_t /*timestamp*/) override {
+ return hardware::Void();
+ }
+ };
+
+ int32_t Validate(hwc2_display_t display);
+ int32_t Present(hwc2_display_t display);
+
+ void SetBacklightBrightness(int brightness);
+
+ void PostLayers(bool is_geometry_changed);
+ void PostThread();
+
+ int ReadWaitPPState();
+ int BlockUntilVSync();
+ int ReadVSyncTimestamp(int64_t* timestamp);
+ int WaitForVSync(int64_t* timestamp);
+ int SleepUntil(int64_t wakeup_timestamp);
+
+ bool IsFramePendingInDriver() { return ReadWaitPPState() == 1; }
+
+ // Returns true if the layer config changed, false otherwise
+ bool UpdateLayerConfig(
+ std::vector<std::shared_ptr<DisplaySurface>>* compositor_surfaces);
+ void PostCompositorBuffers(
+ const std::vector<std::shared_ptr<DisplaySurface>>& compositor_surfaces);
+
+ void UpdateDisplayState();
+
+ struct FrameTimeMeasurementRecord {
+ int64_t start_time;
+ pdx::LocalHandle fence;
+
+ FrameTimeMeasurementRecord(FrameTimeMeasurementRecord&&) = default;
+ FrameTimeMeasurementRecord& operator=(FrameTimeMeasurementRecord&&) =
+ default;
+ FrameTimeMeasurementRecord(const FrameTimeMeasurementRecord&) = delete;
+ FrameTimeMeasurementRecord& operator=(const FrameTimeMeasurementRecord&) =
+ delete;
+ };
+
+ void UpdateFrameTimeHistory(std::vector<FrameTimeMeasurementRecord>* backlog,
+ int backlog_max,
+ FenceInfoBuffer* fence_info_buffer,
+ FrameTimeHistory* history);
+
+ // Returns true if the frame finished rendering, false otherwise. If the frame
+ // finished the frame end time is stored in timestamp. Doesn't block.
+ bool CheckFrameFinished(int frame_fence_fd,
+ FenceInfoBuffer* fence_info_buffer,
+ int64_t* timestamp);
+
+ void HandlePendingScreenshots();
+
+ void PausePostThread();
+
+ // Hardware composer HAL device.
+ std::unique_ptr<Hwc2::Composer> hwc2_hidl_;
+ sp<ComposerCallback> callbacks_;
+
+ // Display metrics of the physical display.
+ HWCDisplayMetrics native_display_metrics_;
+ // Display metrics of the logical display, adjusted so that orientation is
+ // landscape.
+ HWCDisplayMetrics display_metrics_;
+ // Transform required to get from native to logical display orientation.
+ hwc_transform_t display_transform_;
+
+ // Buffer for the background layer required by hardware composer.
+ std::shared_ptr<IonBuffer> framebuffer_target_;
+
+ // Protects access to the display surfaces and logical layers.
+ std::mutex layer_mutex_;
+
+ // Active display surfaces configured by the display manager.
+ std::vector<std::shared_ptr<DisplaySurface>> display_surfaces_;
+ std::vector<std::shared_ptr<DisplaySurface>> added_display_surfaces_;
+ bool display_surfaces_updated_;
+ bool hardware_layers_need_update_;
+
+ // Cache whether the display was turned on by us
+ bool display_on_; // TODO(hendrikw): The display is always on. Revisit.
+
+ // Layer array for handling buffer flow into hardware composer layers.
+ // Note that the first array is the actual storage for the layer objects,
+ // and the latter is an array of pointers, which can be freely re-arranged
+ // without messing up the underlying objects.
+ std::array<Layer, kMaxHardwareLayers> layer_storage_;
+ std::array<Layer*, kMaxHardwareLayers> layers_;
+ size_t active_layer_count_;
+
+ // Set by the Post thread to the index of the GPU compositing output
+ // buffer in the layers_ array.
+ Layer* gpu_layer_;
+
+ // Handler to hook vsync events outside of this class.
+ VSyncCallback vsync_callback_;
+
+ // Thread and condition for managing the layer posting thread. This thread
+ // wakes up a short time before vsync to hand buffers to post processing and
+ // the results to hardware composer.
+ std::thread post_thread_;
+
+ // Control variables to control the state of the post thread
+ pdx::LocalHandle terminate_post_thread_event_fd_;
+ bool pause_post_thread_;
+ std::mutex thread_pause_mutex_;
+ std::condition_variable thread_pause_semaphore_;
+
+ // Backlight LED brightness sysfs node.
+ pdx::LocalHandle backlight_brightness_fd_;
+
+ // Primary display vsync event sysfs node.
+ pdx::LocalHandle primary_display_vsync_event_fd_;
+
+ // Primary display wait_pingpong state sysfs node.
+ pdx::LocalHandle primary_display_wait_pp_fd_;
+
+ // VSync sleep timerfd.
+ pdx::LocalHandle vsync_sleep_timer_fd_;
+
+ // The timestamp of the last vsync.
+ int64_t last_vsync_timestamp_;
+
+ // Vsync count since display on.
+ uint32_t vsync_count_;
+
+ // Counter tracking the number of skipped frames.
+ int frame_skip_count_;
+
+ // After construction, only accessed on post_thread_.
+ Compositor compositor_;
+
+ // Fd array for tracking retire fences that are returned by hwc. This allows
+ // us to detect when the display driver begins queuing frames.
+ std::vector<pdx::LocalHandle> retire_fence_fds_;
+
+ // Pose client for frame count notifications. Pose client predicts poses
+ // out to display frame boundaries, so we need to tell it about vsyncs.
+ DvrPose* pose_client_;
+
+ static void HwcRefresh(hwc2_callback_data_t data, hwc2_display_t display);
+ static void HwcVSync(hwc2_callback_data_t data, hwc2_display_t display,
+ int64_t timestamp);
+ static void HwcHotplug(hwc2_callback_data_t callbackData,
+ hwc2_display_t display, hwc2_connection_t connected);
+
+ HardwareComposer(const HardwareComposer&) = delete;
+ void operator=(const HardwareComposer&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_HARDWARE_COMPOSER_H_
diff --git a/libs/vr/libvrflinger/include/dvr/vr_flinger.h b/libs/vr/libvrflinger/include/dvr/vr_flinger.h
new file mode 100644
index 0000000..04c8363
--- /dev/null
+++ b/libs/vr/libvrflinger/include/dvr/vr_flinger.h
@@ -0,0 +1,33 @@
+#ifndef ANDROID_DVR_VR_FLINGER_H_
+#define ANDROID_DVR_VR_FLINGER_H_
+
+#include <thread>
+#include <memory>
+
+namespace android {
+
+namespace Hwc2 {
+class Composer;
+} // namespace Hwc2
+
+namespace dvr {
+
+class DisplayService;
+
+class VrFlinger {
+ public:
+ VrFlinger();
+ int Run(Hwc2::Composer* hidl);
+
+ void EnterVrMode();
+ void ExitVrMode();
+
+ private:
+ std::thread displayd_thread_;
+ std::shared_ptr<android::dvr::DisplayService> display_service_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_VR_FLINGER_H_
diff --git a/libs/vr/libvrflinger/screenshot_service.cpp b/libs/vr/libvrflinger/screenshot_service.cpp
new file mode 100644
index 0000000..e174943
--- /dev/null
+++ b/libs/vr/libvrflinger/screenshot_service.cpp
@@ -0,0 +1,181 @@
+#include "screenshot_service.h"
+
+#include <utils/Trace.h>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/display_types.h>
+
+using android::pdx::Message;
+using android::pdx::MessageInfo;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+using android::pdx::rpc::RemoteMethodError;
+using android::pdx::rpc::RemoteMethodReturn;
+
+namespace android {
+namespace dvr {
+
+ScreenshotService::~ScreenshotService() { instance_ = nullptr; }
+
+int ScreenshotService::HandleMessage(pdx::Message& message) {
+ switch (message.GetOp()) {
+ case DisplayScreenshotRPC::GetFormat::Opcode:
+ DispatchRemoteMethod<DisplayScreenshotRPC::GetFormat>(
+ *this, &ScreenshotService::OnGetFormat, message);
+ return 0;
+
+ case DisplayScreenshotRPC::TakeScreenshot::Opcode:
+ DispatchRemoteMethod<DisplayScreenshotRPC::TakeScreenshot>(
+ *this, &ScreenshotService::OnTakeScreenshot, message);
+ return 0;
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+int ScreenshotService::OnGetFormat(pdx::Message&) {
+ return HAL_PIXEL_FORMAT_RGB_888;
+}
+
+ScreenshotData ScreenshotService::OnTakeScreenshot(pdx::Message& message,
+ int layer_index) {
+ AddWaiter(std::move(message), layer_index);
+ return {};
+}
+
+void ScreenshotService::AddWaiter(pdx::Message&& message, int layer_index) {
+ std::lock_guard<std::mutex> lock(mutex_);
+ waiters_.emplace_back(std::move(message), layer_index);
+}
+
+void ScreenshotService::TakeIfNeeded(
+ std::array<Layer*, HardwareComposer::kMaxHardwareLayers>& hw_layers,
+ Compositor& compositor) {
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ // Send the buffer contents to all of our waiting clients.
+ for (auto& waiter : waiters_) {
+ if (waiter.IsDone())
+ continue;
+
+ if (waiter.layer_index() == 0) {
+ ALOGE(
+ "ScreenshotService::TakeIfNeeded: Capturing the composited display "
+ "output is not yet supported.");
+
+ waiter.Error(EINVAL);
+ continue;
+ }
+
+ if (waiter.layer_index() > 0) {
+ // Check for hardware layer screenshot requests.
+ // Hardware layers are requested with positive indices starting at 1.
+ const size_t layer_index = static_cast<size_t>(waiter.layer_index() - 1);
+
+ if (layer_index >= hw_layers.size()) {
+ waiter.Error(EINVAL);
+ continue;
+ }
+
+ auto buffer = hw_layers[layer_index]->GetBuffer();
+ if (!buffer) {
+ waiter.Error(ENOBUFS);
+ continue;
+ }
+
+ auto data = compositor.ReadBufferPixels(buffer);
+ if (data.empty()) {
+ waiter.Error(ENOBUFS);
+ continue;
+ }
+
+ Take(&waiter, data.data(), buffer->width(), buffer->height(),
+ buffer->width());
+ } else {
+ // Check for compositor input layer screenshot requests.
+ // Prewarp surfaces are requested with negative indices starting at -1.
+ const size_t layer_index = static_cast<size_t>(-waiter.layer_index() - 1);
+
+ if (layer_index >= compositor.GetLayerCount()) {
+ waiter.Error(EINVAL);
+ continue;
+ }
+
+ int width = 0;
+ int height = 0;
+ auto data = compositor.ReadLayerPixels(layer_index, &width, &height);
+ if (data.empty()) {
+ waiter.Error(ENOBUFS);
+ continue;
+ }
+
+ Take(&waiter, data.data(), width, height, width);
+ }
+ }
+
+ // Reply with error to requests that did not match up with a source layer.
+ for (auto& waiter : waiters_) {
+ if (!waiter.IsDone())
+ waiter.Error(EAGAIN);
+ }
+ waiters_.clear();
+}
+
+void ScreenshotWaiter::Reply(const ScreenshotData& screenshot) {
+ ALOGI("Returning screenshot: size=%zu recv_size=%zu",
+ screenshot.buffer.size(), message_.GetReceiveLength());
+ RemoteMethodReturn<DisplayScreenshotRPC::TakeScreenshot>(message_,
+ screenshot);
+}
+
+void ScreenshotWaiter::Error(int error) { RemoteMethodError(message_, error); }
+
+void ScreenshotService::Take(ScreenshotWaiter* waiter, const void* rgba_data,
+ int32_t width, int32_t height, int buffer_stride) {
+ ATRACE_NAME(__PRETTY_FUNCTION__);
+
+ bool is_portrait = height > width;
+ if (is_portrait) {
+ std::swap(width, height);
+ }
+ int response_stride = width;
+
+ // Convert from RGBA to RGB and if in portrait, rotates to landscape; store
+ // the result in the response buffer.
+ ScreenshotData screenshot{width, height,
+ std::vector<uint8_t>(width * height * 3)};
+
+ const auto rgba_bytes = static_cast<const uint8_t*>(rgba_data);
+ for (int j = 0; j < height; ++j) {
+ for (int i = 0; i < width; ++i) {
+ // If the screenshot is in portrait mode, rotate into landscape mode.
+ const int response_index = is_portrait
+ ? (height - j - 1) * response_stride + i
+ : j * response_stride + i;
+ const int buffer_index =
+ is_portrait ? i * buffer_stride + j : j * buffer_stride + i;
+ screenshot.buffer[response_index * 3 + 0] =
+ rgba_bytes[buffer_index * 4 + 0];
+ screenshot.buffer[response_index * 3 + 1] =
+ rgba_bytes[buffer_index * 4 + 1];
+ screenshot.buffer[response_index * 3 + 2] =
+ rgba_bytes[buffer_index * 4 + 2];
+ }
+ }
+
+ waiter->Reply(screenshot);
+}
+
+ScreenshotService::ScreenshotService()
+ : BASE("ScreenshotService",
+ Endpoint::Create(DisplayScreenshotRPC::kClientPath)) {
+ instance_ = this;
+}
+
+ScreenshotService* ScreenshotService::GetInstance() { return instance_; }
+
+ScreenshotService* ScreenshotService::instance_ = nullptr;
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/screenshot_service.h b/libs/vr/libvrflinger/screenshot_service.h
new file mode 100644
index 0000000..ec4c527
--- /dev/null
+++ b/libs/vr/libvrflinger/screenshot_service.h
@@ -0,0 +1,82 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_SCREENSHOT_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_SCREENSHOT_SERVICE_H_
+
+#include <pdx/rpc/pointer_wrapper.h>
+#include <pdx/service.h>
+#include <private/dvr/ion_buffer.h>
+
+#include <mutex>
+#include <vector>
+
+#include "hardware_composer.h"
+
+namespace android {
+namespace dvr {
+
+class ScreenshotWaiter {
+ public:
+ explicit ScreenshotWaiter(pdx::Message&& message, int layer_index)
+ : message_(std::move(message)), layer_index_(layer_index) {}
+ ScreenshotWaiter(ScreenshotWaiter&&) = default;
+
+ void Reply(const ScreenshotData& screenshot);
+ void Error(int error);
+
+ bool IsDone() const { return message_.replied(); }
+ int layer_index() const { return layer_index_; }
+
+ private:
+ pdx::Message message_;
+ int layer_index_;
+
+ ScreenshotWaiter(const ScreenshotWaiter&) = delete;
+ void operator=(const ScreenshotWaiter&) = delete;
+};
+
+// The screenshot service allows clients to obtain screenshots from displayd.
+class ScreenshotService : public pdx::ServiceBase<ScreenshotService> {
+ public:
+ ~ScreenshotService();
+
+ int HandleMessage(pdx::Message& message) override;
+
+ // Returns true if there is a pending screenshot request.
+ bool IsScreenshotRequestPending() const {
+ std::lock_guard<std::mutex> lock(mutex_);
+ return !waiters_.empty();
+ }
+
+ // If any clients are currently waiting for a screenshot, read back the
+ // contents of requested layers and send the resulting
+ // image to the clients.
+ void TakeIfNeeded(
+ std::array<Layer*, HardwareComposer::kMaxHardwareLayers>& hw_layers,
+ Compositor& compositor);
+
+ static ScreenshotService* GetInstance();
+
+ private:
+ friend BASE;
+
+ ScreenshotService();
+
+ void AddWaiter(pdx::Message&& message, int layer_index);
+
+ ScreenshotData OnTakeScreenshot(pdx::Message& message, int index);
+ int OnGetFormat(pdx::Message& message);
+
+ // Copy the given screenshot data into the message reply.
+ void Take(ScreenshotWaiter* waiter, const void* rgba_data, int32_t width,
+ int32_t height, int buffer_stride);
+
+ static ScreenshotService* instance_;
+
+ // Protects access to subsequent member variables.
+ mutable std::mutex mutex_;
+ std::vector<ScreenshotWaiter> waiters_;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_SCREENSHOT_SERVICE_H_
diff --git a/libs/vr/libvrflinger/surface_channel.cpp b/libs/vr/libvrflinger/surface_channel.cpp
new file mode 100644
index 0000000..8aa220b
--- /dev/null
+++ b/libs/vr/libvrflinger/surface_channel.cpp
@@ -0,0 +1,44 @@
+#include "surface_channel.h"
+
+using android::pdx::BorrowedChannelHandle;
+using android::pdx::Message;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+int SurfaceChannel::HandleMessage(Message& message) {
+ switch (message.GetOp()) {
+ case DisplayRPC::GetMetadataBuffer::Opcode:
+ DispatchRemoteMethod<DisplayRPC::GetMetadataBuffer>(
+ *this, &SurfaceChannel::OnGetMetadataBuffer, message);
+ break;
+ }
+
+ return 0;
+}
+
+BorrowedChannelHandle SurfaceChannel::OnGetMetadataBuffer(Message& message) {
+ if (EnsureMetadataBuffer()) {
+ return metadata_buffer_->GetChannelHandle().Borrow();
+ } else {
+ REPLY_ERROR_RETURN(message, -ENOMEM, {});
+ }
+}
+
+bool SurfaceChannel::EnsureMetadataBuffer() {
+ if (!metadata_buffer_) {
+ metadata_buffer_ =
+ BufferProducer::CreateUncachedBlob(metadata_size());
+ if (!metadata_buffer_) {
+ ALOGE(
+ "DisplaySurface::EnsureMetadataBuffer: could not allocate metadata "
+ "buffer");
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/surface_channel.h b/libs/vr/libvrflinger/surface_channel.h
new file mode 100644
index 0000000..870e1a4
--- /dev/null
+++ b/libs/vr/libvrflinger/surface_channel.h
@@ -0,0 +1,63 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_SURFACE_CHANNEL_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_SURFACE_CHANNEL_H_
+
+#include <pdx/service.h>
+#include <private/dvr/buffer_hub_client.h>
+#include <private/dvr/display_rpc.h>
+
+namespace android {
+namespace dvr {
+
+class DisplayService;
+
+class SurfaceChannel : public pdx::Channel {
+ public:
+ SurfaceChannel(DisplayService* service, int channel_id, SurfaceType type,
+ size_t metadata_size)
+ : service_(service),
+ surface_id_(channel_id),
+ type_(type),
+ metadata_size_(metadata_size) {}
+
+ ~SurfaceChannel() override = default;
+
+ DisplayService* service() const { return service_; }
+ int surface_id() const { return surface_id_; }
+ SurfaceType type() const { return type_; }
+ size_t metadata_size() const { return metadata_size_; }
+
+ pdx::LocalHandle GetMetadataBufferFd() {
+ return EnsureMetadataBuffer() ? metadata_buffer_->GetBlobFd()
+ : pdx::LocalHandle{};
+ }
+
+ // Dispatches surface channel messages to the appropriate handlers. This
+ // handler runs on the displayd message dispatch thread.
+ virtual int HandleMessage(pdx::Message& message);
+
+ protected:
+ // Contains the surface metadata.
+ std::shared_ptr<BufferProducer> metadata_buffer_;
+
+ // Returns the metadata buffer for this surface. The first call allocates the
+ // buffer, while subsequent calls return the same buffer.
+ pdx::BorrowedChannelHandle OnGetMetadataBuffer(pdx::Message& message);
+
+ // Allocates the single metadata buffer for this surface unless it is already
+ // allocated. Idempotent when called multiple times.
+ bool EnsureMetadataBuffer();
+
+ private:
+ DisplayService* service_;
+ int surface_id_;
+ SurfaceType type_;
+ size_t metadata_size_;
+
+ SurfaceChannel(const SurfaceChannel&) = delete;
+ void operator=(const SurfaceChannel&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_SURFACE_CHANNEL_H_
diff --git a/libs/vr/libvrflinger/video_compositor.cpp b/libs/vr/libvrflinger/video_compositor.cpp
new file mode 100644
index 0000000..6b39a3c
--- /dev/null
+++ b/libs/vr/libvrflinger/video_compositor.cpp
@@ -0,0 +1,129 @@
+#include "video_compositor.h"
+
+#include <EGL/eglext.h>
+#include <GLES2/gl2ext.h>
+
+#include <private/dvr/debug.h>
+#include <private/dvr/display_rpc.h>
+
+namespace android {
+namespace dvr {
+
+VideoCompositor::Texture::Texture(
+ EGLDisplay display, const std::shared_ptr<BufferConsumer>& buffer_consumer)
+ : display_(display),
+ image_(EGL_NO_IMAGE_KHR),
+ texture_id_(0),
+ buffer_consumer_(buffer_consumer) {}
+
+VideoCompositor::Texture::~Texture() {
+ if (image_ != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(display_, image_);
+ if (texture_id_ != 0)
+ glDeleteTextures(1, &texture_id_);
+}
+
+GLuint VideoCompositor::Texture::EnsureTextureReady() {
+ if (!image_) {
+ native_buffer_ = new NativeBuffer(buffer_consumer_);
+ CHECK_GL();
+
+ image_ = eglCreateImageKHR(
+ display_, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<ANativeWindowBuffer*>(native_buffer_.get()), nullptr);
+ if (!image_) {
+ ALOGE("Failed to create EGLImage.");
+ return 0;
+ }
+
+ glGenTextures(1, &texture_id_);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id_);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, image_);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE);
+ CHECK_GL();
+ }
+
+ return texture_id_;
+}
+
+void VideoCompositor::Texture::Release() {
+ const int ret = buffer_consumer_->Release({});
+ if (ret < 0) {
+ ALOGE(
+ "VideoCompositor::Texture::Release: Failed to release buffer, error: "
+ "%s",
+ strerror(-ret));
+ }
+}
+
+VideoCompositor::VideoCompositor(
+ const std::shared_ptr<VideoMeshSurface>& surface,
+ const volatile DisplaySurfaceMetadata* display_surface_metadata)
+ : surface_(surface),
+ consumer_queue_(surface->GetConsumerQueue()),
+ transform_metadata_(display_surface_metadata),
+ active_texture_slot_(-1) {}
+
+GLuint VideoCompositor::GetActiveTextureId(EGLDisplay display) {
+ size_t slot;
+ VideoMeshSurfaceBufferMetadata metadata;
+
+ while (true) {
+ // A native way to pick the active texture: always dequeue all buffers from
+ // the queue until it's empty. This works well as long as video frames are
+ // queued in order from the producer side.
+ // TODO(jwcai) Use |metadata.timestamp_ns| to schedule video frames
+ // accurately.
+ auto buffer_consumer = consumer_queue_->Dequeue(0, &slot, &metadata);
+
+ if (buffer_consumer) {
+ // Create a new texture if it hasn't been created yet, or the same slot
+ // has a new |buffer_consumer|.
+ if (textures_[slot] == nullptr ||
+ textures_[slot]->event_fd() != buffer_consumer->event_fd()) {
+ textures_[slot] =
+ std::unique_ptr<Texture>(new Texture(display, buffer_consumer));
+ }
+
+ if (active_texture_slot_ != static_cast<int>(slot)) {
+ if (active_texture_slot_ >= 0) {
+ // Release the last active texture and move on to use the new one.
+ textures_[active_texture_slot_]->Release();
+ }
+ active_texture_slot_ = slot;
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (active_texture_slot_ < 0) {
+ // No texture is active yet.
+ return 0;
+ }
+
+ return textures_[active_texture_slot_]->EnsureTextureReady();
+}
+
+mat4 VideoCompositor::GetTransform(int eye, size_t render_buffer_index) {
+ volatile const VideoMeshSurfaceMetadata* transform_metadata =
+ surface_->GetMetadataBufferPtr();
+
+ mat4 screen_transform;
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ screen_transform(i, j) =
+ transform_metadata->transform[render_buffer_index][eye].val[i][j];
+ }
+ }
+
+ return screen_transform;
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/video_compositor.h b/libs/vr/libvrflinger/video_compositor.h
new file mode 100644
index 0000000..d0e72e1
--- /dev/null
+++ b/libs/vr/libvrflinger/video_compositor.h
@@ -0,0 +1,84 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_COMPOSITOR_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_COMPOSITOR_H_
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+#include <private/dvr/buffer_hub_queue_core.h>
+#include <private/dvr/types.h>
+
+#include <vector>
+
+#include "display_surface.h"
+#include "video_mesh_surface.h"
+
+namespace android {
+namespace dvr {
+
+using pdx::LocalHandle;
+
+// Manages video buffer consumers, texture mapping, and playback timing.
+class VideoCompositor {
+ public:
+ VideoCompositor(
+ const std::shared_ptr<VideoMeshSurface>& surface,
+ const volatile DisplaySurfaceMetadata* display_surface_metadata);
+
+ int surface_id() const { return surface_ ? surface_->surface_id() : -1; }
+
+ // Returns a GL texture id that should be composited by displayd during the
+ // current rendering loop. Note that this function must be called in
+ // displayd's GL context.
+ GLuint GetActiveTextureId(EGLDisplay display);
+
+ // Returns a basic video mesh tranform.
+ mat4 GetTransform(int eye, size_t render_buffer_index);
+
+ private:
+ class Texture {
+ public:
+ Texture(EGLDisplay display,
+ const std::shared_ptr<BufferConsumer>& buffer_consumer);
+ ~Texture();
+
+ // Returns the |event_fd| of the underlying buffer consumer. Caller can use
+ // this to decided whether the Texture need to be recreated for a different
+ // buffer consumer.
+ int event_fd() const { return buffer_consumer_->event_fd(); }
+
+ // Method to map a dvr::BufferConsumer to a GL texture within the current GL
+ // context. If the current Texture object's |image_| hasn't been
+ // initialized, the method will do so based on the |buffer_consumer| and a
+ // new GL texture will be generated, cached, and returned. Otherwise, the
+ // cached |texture_id_| will be returned directly.
+ GLuint EnsureTextureReady();
+
+ // Signal bufferhub that the texture is done rendering so that the buffer
+ // can be re-gained by the producer for future use.
+ void Release();
+
+ private:
+ using NativeBuffer = BufferHubQueueCore::NativeBuffer;
+
+ EGLDisplay display_;
+ EGLImageKHR image_;
+ GLuint texture_id_;
+ sp<NativeBuffer> native_buffer_;
+ std::shared_ptr<BufferConsumer> buffer_consumer_;
+ };
+
+ std::shared_ptr<VideoMeshSurface> surface_;
+ std::shared_ptr<ConsumerQueue> consumer_queue_;
+ std::array<std::unique_ptr<Texture>, BufferHubQueue::kMaxQueueCapacity>
+ textures_;
+
+ const volatile DisplaySurfaceMetadata* transform_metadata_;
+ int active_texture_slot_;
+
+ VideoCompositor(const VideoCompositor&) = delete;
+ void operator=(const VideoCompositor&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_COMPOSITOR_H_
diff --git a/libs/vr/libvrflinger/video_mesh_surface.cpp b/libs/vr/libvrflinger/video_mesh_surface.cpp
new file mode 100644
index 0000000..a961a3d
--- /dev/null
+++ b/libs/vr/libvrflinger/video_mesh_surface.cpp
@@ -0,0 +1,59 @@
+#include "video_mesh_surface.h"
+
+#include <private/dvr/display_rpc.h>
+
+using android::pdx::LocalChannelHandle;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+VideoMeshSurface::VideoMeshSurface(DisplayService* service, int surface_id)
+ : SurfaceChannel(service, surface_id, SurfaceTypeEnum::VideoMesh,
+ sizeof(VideoMeshSurfaceMetadata)) {}
+
+VideoMeshSurface::~VideoMeshSurface() {}
+
+int VideoMeshSurface::HandleMessage(Message& message) {
+ ATRACE_NAME("VideoMeshSurface::HandleMessage");
+
+ switch (message.GetOp()) {
+ case DisplayRPC::VideoMeshSurfaceCreateProducerQueue::Opcode:
+ DispatchRemoteMethod<DisplayRPC::VideoMeshSurfaceCreateProducerQueue>(
+ *this, &VideoMeshSurface::OnCreateProducerQueue, message);
+ break;
+
+ default:
+ return SurfaceChannel::HandleMessage(message);
+ }
+
+ return 0;
+}
+
+std::shared_ptr<ConsumerQueue> VideoMeshSurface::GetConsumerQueue() {
+ if (!consumer_queue_) {
+ ALOGE(
+ "VideoMeshSurface::GetConsumerQueue: consumer_queue is uninitialized.");
+ }
+
+ return consumer_queue_;
+}
+
+LocalChannelHandle VideoMeshSurface::OnCreateProducerQueue(Message& message) {
+ ATRACE_NAME("VideoMeshSurface::OnCreateProducerQueue");
+
+ if (consumer_queue_ != nullptr) {
+ ALOGE(
+ "VideoMeshSurface::OnCreateProducerQueue: A ProdcuerQueue has already "
+ "been created and transported to VideoMeshSurfaceClient.");
+ REPLY_ERROR_RETURN(message, EALREADY, {});
+ }
+
+ auto producer = ProducerQueue::Create<VideoMeshSurfaceBufferMetadata>();
+ consumer_queue_ = producer->CreateConsumerQueue();
+
+ return std::move(producer->GetChannelHandle());
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/video_mesh_surface.h b/libs/vr/libvrflinger/video_mesh_surface.h
new file mode 100644
index 0000000..1370793
--- /dev/null
+++ b/libs/vr/libvrflinger/video_mesh_surface.h
@@ -0,0 +1,52 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_MESH_SURFACE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_MESH_SURFACE_H_
+
+#include <private/dvr/buffer_hub_queue_client.h>
+
+#include "surface_channel.h"
+
+namespace android {
+namespace dvr {
+
+class DisplayService;
+
+// VideoMeshSurface takes three inputs: 1) buffers filled by Android system
+// components (e.g. MediaCodec or camera stack) other than applications' GL
+// context; 2) a 3D mesh choosen by application to define the shape of the
+// surface; 3) a transformation matrix from application to define the rotation,
+// position, and scaling of the video surface.
+class VideoMeshSurface : public SurfaceChannel {
+ public:
+ using Message = pdx::Message;
+ using LocalChannelHandle = pdx::LocalChannelHandle;
+
+ VideoMeshSurface(DisplayService* service, int channel_id);
+ ~VideoMeshSurface() override;
+
+ volatile const VideoMeshSurfaceMetadata* GetMetadataBufferPtr() {
+ if (EnsureMetadataBuffer()) {
+ void* addr = nullptr;
+ metadata_buffer_->GetBlobReadWritePointer(metadata_size(), &addr);
+ return static_cast<const volatile VideoMeshSurfaceMetadata*>(addr);
+ } else {
+ return nullptr;
+ }
+ }
+
+ int HandleMessage(Message& message) override;
+
+ std::shared_ptr<ConsumerQueue> GetConsumerQueue();
+
+ private:
+ LocalChannelHandle OnCreateProducerQueue(Message& message);
+
+ std::shared_ptr<ConsumerQueue> consumer_queue_;
+
+ VideoMeshSurface(const VideoMeshSurface&) = delete;
+ void operator=(const VideoMeshSurface&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_VIDEO_MESH_SURFACE_H_
diff --git a/libs/vr/libvrflinger/vr_flinger.cpp b/libs/vr/libvrflinger/vr_flinger.cpp
new file mode 100644
index 0000000..07f36a4
--- /dev/null
+++ b/libs/vr/libvrflinger/vr_flinger.cpp
@@ -0,0 +1,107 @@
+#include <dvr/vr_flinger.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <memory>
+
+#include <binder/ProcessState.h>
+#include <log/log.h>
+#include <cutils/properties.h>
+#include <cutils/sched_policy.h>
+#include <private/dvr/display_client.h>
+#include <sys/resource.h>
+
+#include <pdx/default_transport/service_dispatcher.h>
+
+#include <functional>
+
+#include "DisplayHardware/ComposerHal.h"
+#include "display_manager_service.h"
+#include "display_service.h"
+#include "screenshot_service.h"
+#include "vsync_service.h"
+
+namespace android {
+namespace dvr {
+
+VrFlinger::VrFlinger() {}
+
+int VrFlinger::Run(Hwc2::Composer* hidl) {
+ std::shared_ptr<android::pdx::Service> service;
+
+ ALOGI("Starting up VrFlinger...");
+
+ setpriority(PRIO_PROCESS, 0, android::PRIORITY_URGENT_DISPLAY);
+ set_sched_policy(0, SP_FOREGROUND);
+
+ // We need to be able to create endpoints with full perms.
+ umask(0000);
+
+ android::ProcessState::self()->startThreadPool();
+
+ std::shared_ptr<android::pdx::ServiceDispatcher> dispatcher =
+ android::pdx::default_transport::ServiceDispatcher::Create();
+ CHECK_ERROR(!dispatcher, error, "Failed to create service dispatcher.");
+
+ display_service_ = android::dvr::DisplayService::Create(hidl);
+ CHECK_ERROR(!display_service_, error, "Failed to create display service.");
+ dispatcher->AddService(display_service_);
+
+ service = android::dvr::DisplayManagerService::Create(display_service_);
+ CHECK_ERROR(!service, error, "Failed to create display manager service.");
+ dispatcher->AddService(service);
+
+ service = android::dvr::ScreenshotService::Create();
+ CHECK_ERROR(!service, error, "Failed to create screenshot service.");
+ dispatcher->AddService(service);
+
+ service = android::dvr::VSyncService::Create();
+ CHECK_ERROR(!service, error, "Failed to create vsync service.");
+ dispatcher->AddService(service);
+
+ display_service_->SetVSyncCallback(
+ std::bind(&android::dvr::VSyncService::VSyncEvent,
+ std::static_pointer_cast<android::dvr::VSyncService>(service),
+ std::placeholders::_1, std::placeholders::_2,
+ std::placeholders::_3, std::placeholders::_4));
+
+ displayd_thread_ = std::thread([this, dispatcher]() {
+ ALOGI("Entering message loop.");
+
+ int ret = dispatcher->EnterDispatchLoop();
+ if (ret < 0) {
+ ALOGE("Dispatch loop exited because: %s\n", strerror(-ret));
+ }
+ });
+
+ return NO_ERROR;
+
+error:
+ display_service_.reset();
+
+ return -1;
+}
+
+void VrFlinger::EnterVrMode() {
+ if (display_service_) {
+ display_service_->SetActive(true);
+ } else {
+ ALOGE("Failed to enter VR mode : Display service is not started.");
+ }
+}
+
+void VrFlinger::ExitVrMode() {
+ if (display_service_) {
+ display_service_->SetActive(false);
+ } else {
+ ALOGE("Failed to exit VR mode : Display service is not started.");
+ }
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/vsync_service.cpp b/libs/vr/libvrflinger/vsync_service.cpp
new file mode 100644
index 0000000..48fa2c2
--- /dev/null
+++ b/libs/vr/libvrflinger/vsync_service.cpp
@@ -0,0 +1,208 @@
+#include "vsync_service.h"
+
+#include <log/log.h>
+#include <hardware/hwcomposer.h>
+#include <poll.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <utils/Trace.h>
+
+#include <pdx/default_transport/service_endpoint.h>
+#include <private/dvr/clock_ns.h>
+#include <private/dvr/display_rpc.h>
+#include <private/dvr/display_types.h>
+
+using android::pdx::Channel;
+using android::pdx::Message;
+using android::pdx::MessageInfo;
+using android::pdx::default_transport::Endpoint;
+using android::pdx::rpc::DispatchRemoteMethod;
+
+namespace android {
+namespace dvr {
+
+VSyncService::VSyncService()
+ : BASE("VSyncService", Endpoint::Create(DisplayVSyncRPC::kClientPath)),
+ last_vsync_(0),
+ current_vsync_(0),
+ compositor_time_ns_(0),
+ current_vsync_count_(0) {}
+
+VSyncService::~VSyncService() {}
+
+void VSyncService::VSyncEvent(int display, int64_t timestamp_ns,
+ int64_t compositor_time_ns,
+ uint32_t vsync_count) {
+ ATRACE_NAME("VSyncService::VSyncEvent");
+ std::lock_guard<std::mutex> autolock(mutex_);
+
+ if (display == HWC_DISPLAY_PRIMARY) {
+ last_vsync_ = current_vsync_;
+ current_vsync_ = timestamp_ns;
+ compositor_time_ns_ = compositor_time_ns;
+ current_vsync_count_ = vsync_count;
+
+ NotifyWaiters();
+ UpdateClients();
+ }
+}
+
+std::shared_ptr<Channel> VSyncService::OnChannelOpen(pdx::Message& message) {
+ const MessageInfo& info = message.GetInfo();
+
+ auto client = std::make_shared<VSyncChannel>(*this, info.pid, info.cid);
+ AddClient(client);
+
+ return client;
+}
+
+void VSyncService::OnChannelClose(pdx::Message& /*message*/,
+ const std::shared_ptr<Channel>& channel) {
+ auto client = std::static_pointer_cast<VSyncChannel>(channel);
+ if (!client) {
+ ALOGW("WARNING: VSyncChannel was NULL!!!\n");
+ return;
+ }
+
+ RemoveClient(client);
+}
+
+void VSyncService::AddWaiter(pdx::Message& message) {
+ std::lock_guard<std::mutex> autolock(mutex_);
+ std::unique_ptr<VSyncWaiter> waiter(new VSyncWaiter(message));
+ waiters_.push_back(std::move(waiter));
+}
+
+void VSyncService::AddClient(const std::shared_ptr<VSyncChannel>& client) {
+ std::lock_guard<std::mutex> autolock(mutex_);
+ clients_.push_back(client);
+}
+
+void VSyncService::RemoveClient(const std::shared_ptr<VSyncChannel>& client) {
+ std::lock_guard<std::mutex> autolock(mutex_);
+ clients_.remove(client);
+}
+
+// Private. Assumes mutex is held.
+void VSyncService::NotifyWaiters() {
+ ATRACE_NAME("VSyncService::NotifyWaiters");
+ auto first = waiters_.begin();
+ auto last = waiters_.end();
+
+ while (first != last) {
+ (*first)->Notify(current_vsync_);
+ waiters_.erase(first++);
+ }
+}
+
+// Private. Assumes mutex is held.
+void VSyncService::UpdateClients() {
+ ATRACE_NAME("VSyncService::UpdateClients");
+ auto first = clients_.begin();
+ auto last = clients_.end();
+
+ while (first != last) {
+ (*first)->Signal();
+ first++;
+ }
+}
+
+int VSyncService::HandleMessage(pdx::Message& message) {
+ switch (message.GetOp()) {
+ case DisplayVSyncRPC::Wait::Opcode:
+ AddWaiter(message);
+ return 0;
+
+ case DisplayVSyncRPC::GetLastTimestamp::Opcode:
+ DispatchRemoteMethod<DisplayVSyncRPC::GetLastTimestamp>(
+ *this, &VSyncService::OnGetLastTimestamp, message);
+ return 0;
+
+ case DisplayVSyncRPC::GetSchedInfo::Opcode:
+ DispatchRemoteMethod<DisplayVSyncRPC::GetSchedInfo>(
+ *this, &VSyncService::OnGetSchedInfo, message);
+ return 0;
+
+ case DisplayVSyncRPC::Acknowledge::Opcode:
+ DispatchRemoteMethod<DisplayVSyncRPC::Acknowledge>(
+ *this, &VSyncService::OnAcknowledge, message);
+ return 0;
+
+ default:
+ return Service::HandleMessage(message);
+ }
+}
+
+int64_t VSyncService::OnGetLastTimestamp(pdx::Message& message) {
+ auto client = std::static_pointer_cast<VSyncChannel>(message.GetChannel());
+ std::lock_guard<std::mutex> autolock(mutex_);
+
+ // Getting the timestamp has the side effect of ACKing.
+ client->Ack();
+ return current_vsync_;
+}
+
+VSyncSchedInfo VSyncService::OnGetSchedInfo(pdx::Message& message) {
+ auto client = std::static_pointer_cast<VSyncChannel>(message.GetChannel());
+ std::lock_guard<std::mutex> autolock(mutex_);
+
+ // Getting the timestamp has the side effect of ACKing.
+ client->Ack();
+
+ uint32_t next_vsync_count = current_vsync_count_ + 1;
+ int64_t current_time = GetSystemClockNs();
+ int64_t vsync_period_ns = 0;
+ int64_t next_warp;
+ if (current_vsync_ == 0 || last_vsync_ == 0) {
+ // Handle startup when current_vsync_ or last_vsync_ are 0.
+ // Normally should not happen because vsync_service is running before
+ // applications, but in case it does a sane time prevents applications
+ // from malfunctioning.
+ vsync_period_ns = 20000000;
+ next_warp = current_time;
+ } else {
+ // TODO(jbates) When we have an accurate reading of the true vsync
+ // period, use that instead of this estimated value.
+ vsync_period_ns = current_vsync_ - last_vsync_;
+ // Clamp the period, because when there are no surfaces the last_vsync_
+ // value will get stale. Note this is temporary and goes away as soon
+ // as we have an accurate vsync period reported by the system.
+ vsync_period_ns = std::min(vsync_period_ns, INT64_C(20000000));
+ next_warp = current_vsync_ + vsync_period_ns - compositor_time_ns_;
+ // If the request missed the present window, move up to the next vsync.
+ if (current_time > next_warp) {
+ next_warp += vsync_period_ns;
+ ++next_vsync_count;
+ }
+ }
+
+ return {vsync_period_ns, next_warp, next_vsync_count};
+}
+
+int VSyncService::OnAcknowledge(pdx::Message& message) {
+ auto client = std::static_pointer_cast<VSyncChannel>(message.GetChannel());
+ std::lock_guard<std::mutex> autolock(mutex_);
+ client->Ack();
+ return 0;
+}
+
+void VSyncWaiter::Notify(int64_t timestamp) {
+ timestamp_ = timestamp;
+ DispatchRemoteMethod<DisplayVSyncRPC::Wait>(*this, &VSyncWaiter::OnWait,
+ message_);
+}
+
+int64_t VSyncWaiter::OnWait(pdx::Message& /*message*/) { return timestamp_; }
+
+void VSyncChannel::Ack() {
+ ALOGD_IF(TRACE, "VSyncChannel::Ack: pid=%d cid=%d\n", pid_, cid_);
+ service_.ModifyChannelEvents(cid_, POLLPRI, 0);
+}
+
+void VSyncChannel::Signal() {
+ ALOGD_IF(TRACE, "VSyncChannel::Signal: pid=%d cid=%d\n", pid_, cid_);
+ service_.ModifyChannelEvents(cid_, 0, POLLPRI);
+}
+
+} // namespace dvr
+} // namespace android
diff --git a/libs/vr/libvrflinger/vsync_service.h b/libs/vr/libvrflinger/vsync_service.h
new file mode 100644
index 0000000..ba1d4df
--- /dev/null
+++ b/libs/vr/libvrflinger/vsync_service.h
@@ -0,0 +1,107 @@
+#ifndef ANDROID_DVR_SERVICES_DISPLAYD_VSYNC_SERVICE_H_
+#define ANDROID_DVR_SERVICES_DISPLAYD_VSYNC_SERVICE_H_
+
+#include <pdx/service.h>
+
+#include <list>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include "display_service.h"
+
+namespace android {
+namespace dvr {
+
+// VSyncWaiter encapsulates a client blocked waiting for the next vsync.
+// It is used to enqueue the Message to reply to when the next vsync event
+// occurs.
+class VSyncWaiter {
+ public:
+ explicit VSyncWaiter(pdx::Message& message) : message_(std::move(message)) {}
+
+ void Notify(int64_t timestamp);
+
+ private:
+ int64_t OnWait(pdx::Message& message);
+
+ pdx::Message message_;
+ int64_t timestamp_ = 0;
+
+ VSyncWaiter(const VSyncWaiter&) = delete;
+ void operator=(const VSyncWaiter&) = delete;
+};
+
+// VSyncChannel manages the service-side per-client context for each client
+// using the service.
+class VSyncChannel : public pdx::Channel {
+ public:
+ VSyncChannel(pdx::Service& service, int pid, int cid)
+ : service_(service), pid_(pid), cid_(cid) {}
+
+ void Ack();
+ void Signal();
+
+ private:
+ pdx::Service& service_;
+ pid_t pid_;
+ int cid_;
+
+ VSyncChannel(const VSyncChannel&) = delete;
+ void operator=(const VSyncChannel&) = delete;
+};
+
+// VSyncService implements the displayd vsync service over ServiceFS.
+class VSyncService : public pdx::ServiceBase<VSyncService> {
+ public:
+ ~VSyncService() override;
+
+ int HandleMessage(pdx::Message& message) override;
+
+ std::shared_ptr<pdx::Channel> OnChannelOpen(pdx::Message& message) override;
+ void OnChannelClose(pdx::Message& message,
+ const std::shared_ptr<pdx::Channel>& channel) override;
+
+ // Called by the hardware composer HAL, or similar,
+ // whenever a vsync event occurs.
+ // |compositor_time_ns| is the number of ns before the next vsync when the
+ // compositor will preempt the GPU to do EDS and lens warp.
+ void VSyncEvent(int display, int64_t timestamp_ns, int64_t compositor_time_ns,
+ uint32_t vsync_count);
+
+ private:
+ friend BASE;
+
+ VSyncService();
+
+ int64_t OnGetLastTimestamp(pdx::Message& message);
+ VSyncSchedInfo OnGetSchedInfo(pdx::Message& message);
+ int OnAcknowledge(pdx::Message& message);
+
+ void NotifierThreadFunction();
+
+ void AddWaiter(pdx::Message& message);
+ void NotifyWaiters();
+ void UpdateClients();
+
+ void AddClient(const std::shared_ptr<VSyncChannel>& client);
+ void RemoveClient(const std::shared_ptr<VSyncChannel>& client);
+
+ int64_t last_vsync_;
+ int64_t current_vsync_;
+ int64_t compositor_time_ns_;
+ uint32_t current_vsync_count_;
+
+ std::mutex mutex_;
+
+ std::list<std::unique_ptr<VSyncWaiter>> waiters_;
+ std::list<std::shared_ptr<VSyncChannel>> clients_;
+
+ VSyncService(const VSyncService&) = delete;
+ void operator=(VSyncService&) = delete;
+};
+
+} // namespace dvr
+} // namespace android
+
+#endif // ANDROID_DVR_SERVICES_DISPLAYD_VSYNC_SERVICE_H_