SF: region sampling scheduling optimization
Schedule the region sampling thread more wisely by estimating when
the next invalidate is going to happen. This makes the region sampling
to be scheduled often at times where the main thread is idle, without
eating the budget in an invalidate message.
Bug: 181983990
Test: SF unit tests
Test: observe systrace
Change-Id: I1faca3aa7f882ed7c69e77e6a0877c10d57f0f1c
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.cpp b/services/surfaceflinger/Scheduler/MessageQueue.cpp
index 7ff0ddf..4d51125 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.cpp
+++ b/services/surfaceflinger/Scheduler/MessageQueue.cpp
@@ -23,7 +23,6 @@
#include <utils/threads.h>
#include <gui/DisplayEventReceiver.h>
-#include <gui/IDisplayEventConnection.h>
#include "EventThread.h"
#include "FrameTimeline.h"
@@ -33,27 +32,32 @@
namespace android::impl {
void MessageQueue::Handler::dispatchRefresh() {
- if ((android_atomic_or(eventMaskRefresh, &mEventMask) & eventMaskRefresh) == 0) {
+ if ((mEventMask.fetch_or(eventMaskRefresh) & eventMaskRefresh) == 0) {
mQueue.mLooper->sendMessage(this, Message(MessageQueue::REFRESH));
}
}
void MessageQueue::Handler::dispatchInvalidate(int64_t vsyncId, nsecs_t expectedVSyncTimestamp) {
- if ((android_atomic_or(eventMaskInvalidate, &mEventMask) & eventMaskInvalidate) == 0) {
+ if ((mEventMask.fetch_or(eventMaskInvalidate) & eventMaskInvalidate) == 0) {
mVsyncId = vsyncId;
mExpectedVSyncTime = expectedVSyncTimestamp;
mQueue.mLooper->sendMessage(this, Message(MessageQueue::INVALIDATE));
}
}
+bool MessageQueue::Handler::invalidatePending() {
+ constexpr auto pendingMask = eventMaskInvalidate | eventMaskRefresh;
+ return (mEventMask.load() & pendingMask) != 0;
+}
+
void MessageQueue::Handler::handleMessage(const Message& message) {
switch (message.what) {
case INVALIDATE:
- android_atomic_and(~eventMaskInvalidate, &mEventMask);
+ mEventMask.fetch_and(~eventMaskInvalidate);
mQueue.mFlinger->onMessageReceived(message.what, mVsyncId, mExpectedVSyncTime);
break;
case REFRESH:
- android_atomic_and(~eventMaskRefresh, &mEventMask);
+ mEventMask.fetch_and(~eventMaskRefresh);
mQueue.mFlinger->onMessageReceived(message.what, mVsyncId, mExpectedVSyncTime);
break;
}
@@ -106,7 +110,7 @@
{
std::lock_guard lock(mVsync.mutex);
mVsync.lastCallbackTime = std::chrono::nanoseconds(vsyncTime);
- mVsync.mScheduled = false;
+ mVsync.scheduled = false;
}
mHandler->dispatchInvalidate(mVsync.tokenManager->generateTokenForPredictions(
{targetWakeupTime, readyTime, vsyncTime}),
@@ -131,9 +135,10 @@
ATRACE_CALL();
std::lock_guard lock(mVsync.mutex);
mVsync.workDuration = workDuration;
- if (mVsync.mScheduled) {
- mVsync.registration->schedule({mVsync.workDuration.get().count(), /*readyDuration=*/0,
- mVsync.lastCallbackTime.count()});
+ if (mVsync.scheduled) {
+ mVsync.expectedWakeupTime = mVsync.registration->schedule(
+ {mVsync.workDuration.get().count(),
+ /*readyDuration=*/0, mVsync.lastCallbackTime.count()});
}
}
@@ -176,10 +181,11 @@
}
std::lock_guard lock(mVsync.mutex);
- mVsync.mScheduled = true;
- mVsync.registration->schedule({.workDuration = mVsync.workDuration.get().count(),
- .readyDuration = 0,
- .earliestVsync = mVsync.lastCallbackTime.count()});
+ mVsync.scheduled = true;
+ mVsync.expectedWakeupTime =
+ mVsync.registration->schedule({.workDuration = mVsync.workDuration.get().count(),
+ .readyDuration = 0,
+ .earliestVsync = mVsync.lastCallbackTime.count()});
}
void MessageQueue::refresh() {
@@ -200,4 +206,19 @@
}
}
+std::optional<std::chrono::steady_clock::time_point> MessageQueue::nextExpectedInvalidate() {
+ if (mHandler->invalidatePending()) {
+ return std::chrono::steady_clock::now();
+ }
+
+ std::lock_guard lock(mVsync.mutex);
+ if (mVsync.scheduled) {
+ LOG_ALWAYS_FATAL_IF(!mVsync.expectedWakeupTime.has_value(), "callback was never scheduled");
+ const auto expectedWakeupTime = std::chrono::nanoseconds(*mVsync.expectedWakeupTime);
+ return std::optional<std::chrono::steady_clock::time_point>(expectedWakeupTime);
+ }
+
+ return std::nullopt;
+}
+
} // namespace android::impl