Improve the VSYNC api a bit.
- add the ability to set the vsync delivery rate, when the rate is
set to N>1 (ie: receive every N vsync), SF process' is woken up for
all of vsync, but clients only see the every N events.
- add the concept of one-shot vsync events, with a call-back
to request the next one. currently the call-back is a binder IPC.
Change-Id: I09f71df0b0ba0d88ed997645e2e2497d553c9a1b
diff --git a/services/surfaceflinger/EventThread.cpp b/services/surfaceflinger/EventThread.cpp
index 42477a9..dc39f88 100644
--- a/services/surfaceflinger/EventThread.cpp
+++ b/services/surfaceflinger/EventThread.cpp
@@ -47,7 +47,8 @@
status_t EventThread::registerDisplayEventConnection(
const sp<DisplayEventConnection>& connection) {
Mutex::Autolock _l(mLock);
- mDisplayEventConnections.add(connection);
+ ConnectionInfo info;
+ mDisplayEventConnections.add(connection, info);
mCondition.signal();
return NO_ERROR;
}
@@ -55,44 +56,97 @@
status_t EventThread::unregisterDisplayEventConnection(
const wp<DisplayEventConnection>& connection) {
Mutex::Autolock _l(mLock);
- mDisplayEventConnections.remove(connection);
+ mDisplayEventConnections.removeItem(connection);
mCondition.signal();
return NO_ERROR;
}
-status_t EventThread::removeDisplayEventConnection(
+void EventThread::removeDisplayEventConnection(
const wp<DisplayEventConnection>& connection) {
Mutex::Autolock _l(mLock);
- mDisplayEventConnections.remove(connection);
- return NO_ERROR;
+ mDisplayEventConnections.removeItem(connection);
+}
+
+EventThread::ConnectionInfo* EventThread::getConnectionInfoLocked(
+ const wp<DisplayEventConnection>& connection) {
+ ssize_t index = mDisplayEventConnections.indexOfKey(connection);
+ if (index < 0) return NULL;
+ return &mDisplayEventConnections.editValueAt(index);
+}
+
+void EventThread::setVsyncRate(uint32_t count,
+ const wp<DisplayEventConnection>& connection) {
+ if (int32_t(count) >= 0) { // server must protect against bad params
+ Mutex::Autolock _l(mLock);
+ ConnectionInfo* info = getConnectionInfoLocked(connection);
+ if (info) {
+ info->count = (count == 0) ? -1 : count;
+ mCondition.signal();
+ }
+ }
+}
+
+void EventThread::requestNextVsync(
+ const wp<DisplayEventConnection>& connection) {
+ Mutex::Autolock _l(mLock);
+ ConnectionInfo* info = getConnectionInfoLocked(connection);
+ if (info) {
+ if (info->count < 0) {
+ info->count = 0;
+ }
+ mCondition.signal();
+ }
}
bool EventThread::threadLoop() {
nsecs_t timestamp;
DisplayEventReceiver::Event vsync;
- SortedVector<wp<DisplayEventConnection> > displayEventConnections;
+ KeyedVector< wp<DisplayEventConnection>, ConnectionInfo > displayEventConnections;
{ // scope for the lock
Mutex::Autolock _l(mLock);
do {
// wait for listeners
- while (!mDisplayEventConnections.size()) {
+ do {
+ bool waitForNextVsync = false;
+ size_t count = mDisplayEventConnections.size();
+ for (size_t i=0 ; i<count ; i++) {
+ const ConnectionInfo& info(
+ mDisplayEventConnections.valueAt(i));
+ if (info.count >= 1) {
+ // continuous mode
+ waitForNextVsync = true;
+ } else {
+ // one-shot event
+ if (info.count >= -1) {
+ ConnectionInfo& info(
+ mDisplayEventConnections.editValueAt(i));
+ info.count--;
+ if (info.count == -1) {
+ // fired this time around
+ waitForNextVsync = true;
+ }
+ }
+ }
+ }
+
+ if (waitForNextVsync)
+ break;
+
mCondition.wait(mLock);
- }
+ } while(true);
// wait for vsync
mLock.unlock();
timestamp = mHw.waitForVSync();
mLock.lock();
+ mDeliveredEvents++;
// make sure we still have some listeners
} while (!mDisplayEventConnections.size());
-
// dispatch vsync events to listeners...
- mDeliveredEvents++;
-
vsync.header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
vsync.header.timestamp = timestamp;
vsync.vsync.count = mDeliveredEvents;
@@ -104,9 +158,30 @@
const size_t count = displayEventConnections.size();
for (size_t i=0 ; i<count ; i++) {
- sp<DisplayEventConnection> conn(displayEventConnections.itemAt(i).promote());
+ sp<DisplayEventConnection> conn(displayEventConnections.keyAt(i).promote());
// make sure the connection didn't die
if (conn != NULL) {
+
+ const ConnectionInfo& info(
+ displayEventConnections.valueAt(i));
+
+ if ((info.count > 1) && (mDeliveredEvents % info.count)) {
+ // continuous event, but not time to send this event yet
+ continue;
+ } else if (info.count < -1) {
+ // disabled event
+ continue;
+ } else if (info.count == 0) {
+ // impossible by construction. but we prefer to be safe.
+ continue;
+ }
+
+ // here, either:
+ // count = -1 : one-shot scheduled this time around
+ // count = 1 : continuous not rate-limited
+ // count > 1 : continuous, rate-limited
+ // Note: count == 0 is not possible by construction
+
status_t err = conn->postEvent(vsync);
if (err == -EAGAIN || err == -EWOULDBLOCK) {
// The destination doesn't accept events anymore, it's probably
@@ -118,12 +193,12 @@
// handle any other error on the pipe as fatal. the only
// reasonable thing to do is to clean-up this connection.
// The most common error we'll get here is -EPIPE.
- removeDisplayEventConnection(displayEventConnections.itemAt(i));
+ removeDisplayEventConnection(displayEventConnections.keyAt(i));
}
} else {
// somehow the connection is dead, but we still have it in our list
// just clean the list.
- removeDisplayEventConnection(displayEventConnections.itemAt(i));
+ removeDisplayEventConnection(displayEventConnections.keyAt(i));
}
}