When building dispsync model, only callback on observed vsync + offset.

Only when hw vsyncs are turned off should we attempt to predict when
vsyncs should be. Otherwise, if we are in 90hz mode, and observe 16.6ms
vsyncs (60hz) because the display driver detected inactivity and dropped
down to 60hz, then we might dispatch early callbacks. This change
prevents those early callbacks from being dispatched.

Bug: 130684082
Test: systrace
Change-Id: I02bfd22beb00ddbe344c29722581217ad00a96af
diff --git a/services/surfaceflinger/Scheduler/DispSync.cpp b/services/surfaceflinger/Scheduler/DispSync.cpp
index abf7b71..cd6fa41 100644
--- a/services/surfaceflinger/Scheduler/DispSync.cpp
+++ b/services/surfaceflinger/Scheduler/DispSync.cpp
@@ -64,6 +64,7 @@
     DispSyncThread(const char* name, bool showTraceDetailedInfo)
           : mName(name),
             mStop(false),
+            mModelLocked(false),
             mPeriod(0),
             mPhase(0),
             mReferenceTime(0),
@@ -78,6 +79,11 @@
         Mutex::Autolock lock(mMutex);
 
         mPhase = phase;
+        if (mReferenceTime != referenceTime) {
+            for (auto& eventListener : mEventListeners) {
+                eventListener.mHasFired = false;
+            }
+        }
         mReferenceTime = referenceTime;
         if (mPeriod != 0 && mPeriod != period && mReferenceTime != 0) {
             // Inflate the reference time to be the most recent predicted
@@ -106,6 +112,16 @@
         mCond.signal();
     }
 
+    void lockModel() {
+        Mutex::Autolock lock(mMutex);
+        mModelLocked = true;
+    }
+
+    void unlockModel() {
+        Mutex::Autolock lock(mMutex);
+        mModelLocked = false;
+    }
+
     virtual bool threadLoop() {
         status_t err;
         nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
@@ -288,6 +304,7 @@
         nsecs_t mLastEventTime;
         nsecs_t mLastCallbackTime;
         DispSync::Callback* mCallback;
+        bool mHasFired = false;
     };
 
     struct CallbackInvocation {
@@ -335,6 +352,12 @@
                           eventListener.mName);
                     continue;
                 }
+                if (eventListener.mHasFired && !mModelLocked) {
+                    eventListener.mLastEventTime = t;
+                    ALOGV("[%s] [%s] Skipping event due to already firing", mName,
+                          eventListener.mName);
+                    continue;
+                }
                 CallbackInvocation ci;
                 ci.mCallback = eventListener.mCallback;
                 ci.mEventTime = t;
@@ -343,6 +366,7 @@
                 callbackInvocations.push_back(ci);
                 eventListener.mLastEventTime = t;
                 eventListener.mLastCallbackTime = now;
+                eventListener.mHasFired = true;
             }
         }
 
@@ -410,6 +434,7 @@
     const char* const mName;
 
     bool mStop;
+    bool mModelLocked;
 
     nsecs_t mPeriod;
     nsecs_t mPhase;
@@ -497,6 +522,7 @@
     mNumResyncSamples = 0;
     mFirstResyncSample = 0;
     mNumResyncSamplesSincePresent = 0;
+    mThread->unlockModel();
     resetErrorLocked();
 }
 
@@ -519,6 +545,7 @@
 void DispSync::beginResync() {
     Mutex::Autolock lock(mMutex);
     ALOGV("[%s] beginResync", mName);
+    mThread->unlockModel();
     mModelUpdated = false;
     mNumResyncSamples = 0;
 }
@@ -581,10 +608,15 @@
     // resync again
     bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2) && mPendingPeriod == 0;
     ALOGV("[%s] addResyncSample returning %s", mName, modelLocked ? "locked" : "unlocked");
+    if (modelLocked) {
+        mThread->lockModel();
+    }
     return !modelLocked;
 }
 
-void DispSync::endResync() {}
+void DispSync::endResync() {
+    mThread->lockModel();
+}
 
 status_t DispSync::addEventListener(const char* name, nsecs_t phase, Callback* callback,
                                     nsecs_t lastCallbackTime) {