SF: add VSyncDispatch timing objects
Add epoll and timerfd dispatch of events.
Fixes: 140303478
Bug: 140301853
Test: 3 new VSyncDispatchRealtimeTest
Change-Id: Ibc9bbaacfe3774247fb3d059dab3769bb17f3194
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index f529a44..3d94918 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -166,6 +166,7 @@
"Scheduler/Scheduler.cpp",
"Scheduler/SchedulerUtils.cpp",
"Scheduler/VSyncDispatchTimerQueue.cpp",
+ "Scheduler/Timer.cpp",
"Scheduler/VSyncModulator.cpp",
"StartPropertySetThread.cpp",
"SurfaceFlinger.cpp",
diff --git a/services/surfaceflinger/Scheduler/Timer.cpp b/services/surfaceflinger/Scheduler/Timer.cpp
new file mode 100644
index 0000000..2394ed2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "SchedulerTimer"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <log/log.h>
+#include <sys/epoll.h>
+#include <sys/timerfd.h>
+#include <sys/unistd.h>
+#include <utils/Trace.h>
+#include <chrono>
+#include <cstdint>
+
+#include "Timer.h"
+
+namespace android::scheduler {
+
+static constexpr size_t kReadPipe = 0;
+static constexpr size_t kWritePipe = 1;
+
+template <class T, size_t N>
+constexpr size_t arrayLen(T (&)[N]) {
+ return N;
+}
+
+Timer::Timer()
+ : mTimerFd(timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK)),
+ mEpollFd(epoll_create1(EPOLL_CLOEXEC)) {
+ if (pipe2(mPipes.data(), O_CLOEXEC | O_NONBLOCK)) {
+ ALOGE("could not create TimerDispatch mPipes");
+ };
+
+ mDispatchThread = std::thread(std::bind(&Timer::dispatch, this));
+}
+
+Timer::~Timer() {
+ endDispatch();
+ mDispatchThread.join();
+
+ close(mPipes[kWritePipe]);
+ close(mPipes[kReadPipe]);
+ close(mEpollFd);
+ close(mTimerFd);
+}
+
+void Timer::endDispatch() {
+ static constexpr unsigned char end = 'e';
+ write(mPipes[kWritePipe], &end, sizeof(end));
+}
+
+nsecs_t Timer::now() const {
+ return systemTime(SYSTEM_TIME_MONOTONIC);
+}
+
+constexpr char const* timerTraceTag = "AlarmInNs";
+void Timer::alarmIn(std::function<void()> const& cb, nsecs_t fireIn) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, fireIn);
+
+ using namespace std::literals;
+ static constexpr int ns_per_s =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(1s).count();
+
+ mCallback = cb;
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {.tv_sec = static_cast<long>(fireIn / ns_per_s),
+ .tv_nsec = static_cast<long>(fireIn % ns_per_s)},
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to set timerfd");
+ }
+}
+
+void Timer::alarmCancel() {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, 0);
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {
+ .tv_sec = 0,
+ .tv_nsec = 0,
+ },
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to disarm timerfd");
+ }
+}
+
+void Timer::dispatch() {
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m) != 0) {
+ ALOGW("Failed to set SCHED_FIFO on dispatch thread");
+ }
+
+ if (pthread_setname_np(pthread_self(), "TimerDispatch")) {
+ ALOGW("Failed to set thread name on dispatch thread");
+ }
+
+ enum DispatchType : uint32_t { TIMER, TERMINATE, MAX_DISPATCH_TYPE };
+ epoll_event timerEvent;
+ timerEvent.events = EPOLLIN;
+ timerEvent.data.u32 = DispatchType::TIMER;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mTimerFd, &timerEvent) == -1) {
+ ALOGE("Error adding timer fd to epoll dispatch loop");
+ return;
+ }
+
+ epoll_event terminateEvent;
+ terminateEvent.events = EPOLLIN;
+ terminateEvent.data.u32 = DispatchType::TERMINATE;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mPipes[kReadPipe], &terminateEvent) == -1) {
+ ALOGE("Error adding control fd to dispatch loop");
+ return;
+ }
+
+ uint64_t iteration = 0;
+ char const traceNamePrefix[] = "TimerIteration #";
+ static constexpr size_t max64print = std::numeric_limits<decltype(iteration)>::digits10;
+ static constexpr size_t maxlen = arrayLen(traceNamePrefix) + max64print;
+ std::array<char, maxlen> str_buffer;
+ auto timing = true;
+ while (timing) {
+ epoll_event events[DispatchType::MAX_DISPATCH_TYPE];
+ int nfds = epoll_wait(mEpollFd, events, DispatchType::MAX_DISPATCH_TYPE, -1);
+
+ if (ATRACE_ENABLED()) {
+ snprintf(str_buffer.data(), str_buffer.size(), "%s%" PRIu64, traceNamePrefix,
+ iteration++);
+ ATRACE_NAME(str_buffer.data());
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR) {
+ timing = false;
+ continue;
+ }
+ }
+
+ for (auto i = 0; i < nfds; i++) {
+ if (events[i].data.u32 == DispatchType::TIMER) {
+ static uint64_t mIgnored = 0;
+ read(mTimerFd, &mIgnored, sizeof(mIgnored));
+ std::function<void()> cb;
+ {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ cb = mCallback;
+ }
+ if (cb) {
+ cb();
+ }
+ }
+ if (events[i].data.u32 == DispatchType::TERMINATE) {
+ timing = false;
+ }
+ }
+ }
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/Timer.h b/services/surfaceflinger/Scheduler/Timer.h
new file mode 100644
index 0000000..0ae82c8
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "TimeKeeper.h"
+
+#include <android-base/thread_annotations.h>
+#include <array>
+#include <thread>
+
+namespace android::scheduler {
+
+class Timer : public TimeKeeper {
+public:
+ Timer();
+ ~Timer();
+ nsecs_t now() const final;
+
+ // NB: alarmIn and alarmCancel are threadsafe; with the last-returning function being effectual
+ // Most users will want to serialize thes calls so as to be aware of the timer state.
+ void alarmIn(std::function<void()> const& cb, nsecs_t fireIn) final;
+ void alarmCancel() final;
+
+private:
+ int const mTimerFd;
+ int const mEpollFd;
+ std::array<int, 2> mPipes;
+
+ std::thread mDispatchThread;
+ void dispatch();
+ void endDispatch();
+
+ std::mutex mMutex;
+ std::function<void()> mCallback GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index 78114a1..c6c3d29 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -55,6 +55,7 @@
"TransactionApplicationTest.cpp",
"StrongTypingTest.cpp",
"VSyncDispatchTimerQueueTest.cpp",
+ "VSyncDispatchRealtimeTest.cpp",
"mock/DisplayHardware/MockComposer.cpp",
"mock/DisplayHardware/MockDisplay.cpp",
"mock/DisplayHardware/MockPowerAdvisor.cpp",
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
new file mode 100644
index 0000000..c012616
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Scheduler/TimeKeeper.h"
+#include "Scheduler/Timer.h"
+#include "Scheduler/VSyncDispatchTimerQueue.h"
+#include "Scheduler/VSyncTracker.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <thread>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+template <typename Rep, typename Per>
+constexpr nsecs_t toNs(std::chrono::duration<Rep, Per> const& tp) {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(tp).count();
+}
+
+class FixedRateIdealStubTracker : public VSyncTracker {
+public:
+ FixedRateIdealStubTracker() : mPeriod{toNs(3ms)} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final {
+ auto const floor = timePoint % mPeriod;
+ if (floor == 0) {
+ return timePoint;
+ }
+ return timePoint - floor + mPeriod;
+ }
+
+private:
+ nsecs_t const mPeriod;
+};
+
+class VRRStubTracker : public VSyncTracker {
+public:
+ VRRStubTracker(nsecs_t period) : mPeriod{period} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t time_point) const final {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ auto const normalized_to_base = time_point - mBase;
+ auto const floor = (normalized_to_base) % mPeriod;
+ if (floor == 0) {
+ return time_point;
+ }
+ return normalized_to_base - floor + mPeriod + mBase;
+ }
+
+ void set_interval(nsecs_t interval, nsecs_t last_known) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mPeriod = interval;
+ mBase = last_known;
+ }
+
+private:
+ std::mutex mutable mMutex;
+ nsecs_t mPeriod;
+ nsecs_t mBase = 0;
+};
+
+struct VSyncDispatchRealtimeTest : testing::Test {
+ static nsecs_t constexpr mDispatchGroupThreshold = toNs(100us);
+ static size_t constexpr mIterations = 20;
+};
+
+class RepeatingCallbackReceiver {
+public:
+ RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t wl)
+ : mWorkload(wl),
+ mCallback(
+ dispatch, [&](auto time) { callback_called(time); }, "repeat0") {}
+
+ void repeatedly_schedule(size_t iterations, std::function<void(nsecs_t)> const& onEachFrame) {
+ mCallbackTimes.reserve(iterations);
+ mCallback.schedule(mWorkload, systemTime(SYSTEM_TIME_MONOTONIC) + mWorkload);
+
+ for (auto i = 0u; i < iterations - 1; i++) {
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ mCalled = false;
+ auto last = mLastTarget;
+ lk.unlock();
+
+ onEachFrame(last);
+
+ mCallback.schedule(mWorkload, last + mWorkload);
+ }
+
+ // wait for the last callback.
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ }
+
+ void with_callback_times(std::function<void(std::vector<nsecs_t> const&)> const& fn) const {
+ fn(mCallbackTimes);
+ }
+
+private:
+ void callback_called(nsecs_t time) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mCallbackTimes.push_back(time);
+ mCalled = true;
+ mLastTarget = time;
+ mCv.notify_all();
+ }
+
+ nsecs_t const mWorkload;
+ VSyncCallbackRegistration mCallback;
+
+ std::mutex mMutex;
+ std::condition_variable mCv;
+ bool mCalled = false;
+ nsecs_t mLastTarget = 0;
+ std::vector<nsecs_t> mCallbackTimes;
+};
+
+TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
+ FixedRateIdealStubTracker tracker;
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ static size_t constexpr num_clients = 3;
+ std::array<RepeatingCallbackReceiver, num_clients>
+ cb_receiver{RepeatingCallbackReceiver(dispatch, toNs(1500us)),
+ RepeatingCallbackReceiver(dispatch, toNs(0h)),
+ RepeatingCallbackReceiver(dispatch, toNs(1ms))};
+
+ auto const on_each_frame = [](nsecs_t) {};
+ std::array<std::thread, num_clients> threads{
+ std::thread([&] { cb_receiver[0].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[1].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[2].repeatedly_schedule(mIterations, on_each_frame); }),
+ };
+
+ for (auto it = threads.rbegin(); it != threads.rend(); it++) {
+ it->join();
+ }
+
+ for (auto const& cbs : cb_receiver) {
+ cbs.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+ }
+}
+
+// starts at 333hz, slides down to 43hz
+TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
+ auto next_vsync_interval = toNs(3ms);
+ VRRStubTracker tracker(next_vsync_interval);
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
+ };
+
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+
+// starts at 333hz, jumps to 200hz at frame 10
+TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
+ VRRStubTracker tracker(toNs(3ms));
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto jump_frame_counter = 0u;
+ auto constexpr jump_frame_at = 10u;
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ if (jump_frame_counter++ == jump_frame_at) {
+ tracker.set_interval(toNs(5ms), last_known);
+ }
+ };
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+} // namespace android::scheduler