SurfaceFlinger: expected present time directly from VSyncReactor
When SurfaceFlinger calculates what is the vsync time, it does
that by asking VSyncReactor what is next anticipated vsync from the
wake up time. To handle negative offsets (when SF wakes up more than
a vsync period before a vsync event) SF adds a vsync period
to that anticipated vsync.
This creates a race condition where the offset used at the
time of invalidate may be different than the offset used by
VSyncReactor calculated at the time of the requestNextVsync().
To fix that, we plumb the first calculated vsync time to
SurfaceFlinger.
Bug: 154303002
Test: Run app transitions and collect systrace
Change-Id: I3f2670c7b0ecb52a85fb07df6d360694b51d5d66
diff --git a/services/surfaceflinger/Scheduler/DispSync.cpp b/services/surfaceflinger/Scheduler/DispSync.cpp
index fc6ccae..ff91bf7 100644
--- a/services/surfaceflinger/Scheduler/DispSync.cpp
+++ b/services/surfaceflinger/Scheduler/DispSync.cpp
@@ -200,7 +200,8 @@
}
}
- callbackInvocations = gatherCallbackInvocationsLocked(now);
+ callbackInvocations =
+ gatherCallbackInvocationsLocked(now, computeNextRefreshLocked(0, now));
}
if (callbackInvocations.size() > 0) {
@@ -303,6 +304,11 @@
return BAD_VALUE;
}
+ nsecs_t computeNextRefresh(int periodOffset, nsecs_t now) const {
+ Mutex::Autolock lock(mMutex);
+ return computeNextRefreshLocked(periodOffset, now);
+ }
+
private:
struct EventListener {
const char* mName;
@@ -315,6 +321,7 @@
struct CallbackInvocation {
DispSync::Callback* mCallback;
nsecs_t mEventTime;
+ nsecs_t mExpectedVSyncTime;
};
nsecs_t computeNextEventTimeLocked(nsecs_t now) {
@@ -340,7 +347,8 @@
return duration < (3 * mPeriod) / 5;
}
- std::vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
+ std::vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now,
+ nsecs_t expectedVSyncTime) {
if (mTraceDetailedInfo) ATRACE_CALL();
ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, ns2us(now));
@@ -361,6 +369,10 @@
CallbackInvocation ci;
ci.mCallback = eventListener.mCallback;
ci.mEventTime = t;
+ ci.mExpectedVSyncTime = expectedVSyncTime;
+ if (eventListener.mPhase < 0) {
+ ci.mExpectedVSyncTime += mPeriod;
+ }
ALOGV("[%s] [%s] Preparing to fire, latency: %" PRId64, mName, eventListener.mName,
t - eventListener.mLastEventTime);
callbackInvocations.push_back(ci);
@@ -426,10 +438,19 @@
void fireCallbackInvocations(const std::vector<CallbackInvocation>& callbacks) {
if (mTraceDetailedInfo) ATRACE_CALL();
for (size_t i = 0; i < callbacks.size(); i++) {
- callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
+ callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime,
+ callbacks[i].mExpectedVSyncTime);
}
}
+ nsecs_t computeNextRefreshLocked(int periodOffset, nsecs_t now) const {
+ nsecs_t phase = mReferenceTime + mPhase;
+ if (mPeriod == 0) {
+ return 0;
+ }
+ return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
+ }
+
const char* const mName;
bool mStop;
@@ -444,7 +465,7 @@
std::vector<EventListener> mEventListeners;
- Mutex mMutex;
+ mutable Mutex mMutex;
Condition mCond;
// Flag to turn on logging in systrace.
@@ -458,7 +479,7 @@
public:
ZeroPhaseTracer() : mParity("ZERO_PHASE_VSYNC", false) {}
- virtual void onDispSyncEvent(nsecs_t /*when*/) {
+ virtual void onDispSyncEvent(nsecs_t /*when*/, nsecs_t /*expectedVSyncTimestamp*/) {
mParity = !mParity;
}
@@ -845,7 +866,7 @@
const uint32_t hwcLatency = 0;
// Ask DispSync when the next refresh will be (CLOCK_MONOTONIC).
- return computeNextRefresh(hwcLatency, now);
+ return mThread->computeNextRefresh(hwcLatency, now);
}
} // namespace impl