SurfaceFlinger: use duration instead of offset

This change is using the duration given to sf/app without
converting them back to the legacy offset. This allows us to get
the expected vsync associated with a wakeup event.

This change also required some refactoring:
 - Move the periodic callback interface out of DispSync and in
   to DispSyncSource
 - Translate legacy offsets to duration in case that duration
   is not specified in the build files
 - Add support to VSD to expose the deadline timestamp of when
   a frame needs to be ready by

Bug: 162888874
Test: SF unit tests
Test: examine systraces

Change-Id: I87a53cc7dea931d3c195eab6842e003ca4516885
diff --git a/services/surfaceflinger/Scheduler/DispSync.h b/services/surfaceflinger/Scheduler/DispSync.h
index 7b2c9c3..f34aabc 100644
--- a/services/surfaceflinger/Scheduler/DispSync.h
+++ b/services/surfaceflinger/Scheduler/DispSync.h
@@ -32,32 +32,15 @@
 
 class DispSync {
 public:
-    class Callback {
-    public:
-        Callback() = default;
-        virtual ~Callback() = default;
-        virtual void onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) = 0;
-
-    protected:
-        Callback(Callback const&) = delete;
-        Callback& operator=(Callback const&) = delete;
-    };
-
     DispSync() = default;
     virtual ~DispSync() = default;
 
-    virtual void reset() = 0;
     virtual bool addPresentFence(const std::shared_ptr<FenceTime>&) = 0;
     virtual void beginResync() = 0;
     virtual bool addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
                                  bool* periodFlushed) = 0;
-    virtual void endResync() = 0;
     virtual void setPeriod(nsecs_t period) = 0;
     virtual nsecs_t getPeriod() = 0;
-    virtual status_t addEventListener(const char* name, nsecs_t phase, Callback* callback,
-                                      nsecs_t lastCallbackTime) = 0;
-    virtual status_t removeEventListener(Callback* callback, nsecs_t* outLastCallback) = 0;
-    virtual status_t changePhaseOffset(Callback* callback, nsecs_t phase) = 0;
     virtual nsecs_t computeNextRefresh(int periodOffset, nsecs_t now) const = 0;
     virtual void setIgnorePresentFences(bool ignore) = 0;
     virtual nsecs_t expectedPresentTime(nsecs_t now) = 0;
diff --git a/services/surfaceflinger/Scheduler/DispSyncSource.cpp b/services/surfaceflinger/Scheduler/DispSyncSource.cpp
index 8752b66..75f072d 100644
--- a/services/surfaceflinger/Scheduler/DispSyncSource.cpp
+++ b/services/surfaceflinger/Scheduler/DispSyncSource.cpp
@@ -14,10 +14,6 @@
  * limitations under the License.
  */
 
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wconversion"
-
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 
 #include "DispSyncSource.h"
@@ -29,34 +25,129 @@
 #include "DispSync.h"
 #include "EventThread.h"
 
-namespace android {
+namespace android::scheduler {
 using base::StringAppendF;
+using namespace std::chrono_literals;
 
-DispSyncSource::DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync,
+// The DispSync interface has a 'repeat this callback at rate' semantic. This object adapts
+// VSyncDispatch's individually-scheduled callbacks so as to meet DispSync's existing semantic
+// for now.
+class CallbackRepeater {
+public:
+    CallbackRepeater(VSyncDispatch& dispatch, VSyncDispatch::Callback cb, const char* name,
+                     std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration,
+                     std::chrono::nanoseconds notBefore)
+          : mName(name),
+            mCallback(cb),
+            mRegistration(dispatch,
+                          std::bind(&CallbackRepeater::callback, this, std::placeholders::_1,
+                                    std::placeholders::_2, std::placeholders::_3),
+                          mName),
+            mStarted(false),
+            mWorkDuration(workDuration),
+            mReadyDuration(readyDuration),
+            mLastCallTime(notBefore) {}
+
+    ~CallbackRepeater() {
+        std::lock_guard lock(mMutex);
+        mRegistration.cancel();
+    }
+
+    void start(std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration) {
+        std::lock_guard lock(mMutex);
+        mStarted = true;
+        mWorkDuration = workDuration;
+        mReadyDuration = readyDuration;
+
+        auto const scheduleResult =
+                mRegistration.schedule({.workDuration = mWorkDuration.count(),
+                                        .readyDuration = mReadyDuration.count(),
+                                        .earliestVsync = mLastCallTime.count()});
+        LOG_ALWAYS_FATAL_IF((scheduleResult != scheduler::ScheduleResult::Scheduled),
+                            "Error scheduling callback: rc %X", scheduleResult);
+    }
+
+    void stop() {
+        std::lock_guard lock(mMutex);
+        LOG_ALWAYS_FATAL_IF(!mStarted, "DispSyncInterface misuse: callback already stopped");
+        mStarted = false;
+        mRegistration.cancel();
+    }
+
+    void dump(std::string& result) const {
+        std::lock_guard lock(mMutex);
+        const auto relativeLastCallTime =
+                mLastCallTime - std::chrono::steady_clock::now().time_since_epoch();
+        StringAppendF(&result, "\t%s: ", mName.c_str());
+        StringAppendF(&result, "mWorkDuration=%.2f mReadyDuration=%.2f last vsync time ",
+                      mWorkDuration.count() / 1e6f, mReadyDuration.count() / 1e6f);
+        StringAppendF(&result, "%.2fms relative to now (%s)\n", relativeLastCallTime.count() / 1e6f,
+                      mStarted ? "running" : "stopped");
+    }
+
+private:
+    void callback(nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
+        {
+            std::lock_guard lock(mMutex);
+            mLastCallTime = std::chrono::nanoseconds(vsyncTime);
+        }
+
+        mCallback(vsyncTime, wakeupTime, readyTime);
+
+        {
+            std::lock_guard lock(mMutex);
+            if (!mStarted) {
+                return;
+            }
+            auto const scheduleResult =
+                    mRegistration.schedule({.workDuration = mWorkDuration.count(),
+                                            .readyDuration = mReadyDuration.count(),
+                                            .earliestVsync = vsyncTime});
+            LOG_ALWAYS_FATAL_IF((scheduleResult != ScheduleResult::Scheduled),
+                                "Error rescheduling callback: rc %X", scheduleResult);
+        }
+    }
+
+    const std::string mName;
+    scheduler::VSyncDispatch::Callback mCallback;
+
+    mutable std::mutex mMutex;
+    VSyncCallbackRegistration mRegistration GUARDED_BY(mMutex);
+    bool mStarted GUARDED_BY(mMutex) = false;
+    std::chrono::nanoseconds mWorkDuration GUARDED_BY(mMutex) = 0ns;
+    std::chrono::nanoseconds mReadyDuration GUARDED_BY(mMutex) = 0ns;
+    std::chrono::nanoseconds mLastCallTime GUARDED_BY(mMutex) = 0ns;
+};
+
+DispSyncSource::DispSyncSource(scheduler::VSyncDispatch& vSyncDispatch,
+                               std::chrono::nanoseconds workDuration,
+                               std::chrono::nanoseconds readyDuration, bool traceVsync,
                                const char* name)
       : mName(name),
         mValue(base::StringPrintf("VSYNC-%s", name), 0),
         mTraceVsync(traceVsync),
         mVsyncOnLabel(base::StringPrintf("VsyncOn-%s", name)),
-        mDispSync(dispSync),
-        mPhaseOffset(base::StringPrintf("VsyncOffset-%s", name), phaseOffset) {}
+        mWorkDuration(base::StringPrintf("VsyncWorkDuration-%s", name), workDuration),
+        mReadyDuration(readyDuration) {
+    mCallbackRepeater =
+            std::make_unique<CallbackRepeater>(vSyncDispatch,
+                                               std::bind(&DispSyncSource::onVsyncCallback, this,
+                                                         std::placeholders::_1,
+                                                         std::placeholders::_2,
+                                                         std::placeholders::_3),
+                                               name, workDuration, readyDuration,
+                                               std::chrono::steady_clock::now().time_since_epoch());
+}
+
+DispSyncSource::~DispSyncSource() = default;
 
 void DispSyncSource::setVSyncEnabled(bool enable) {
     std::lock_guard lock(mVsyncMutex);
     if (enable) {
-        status_t err = mDispSync->addEventListener(mName, mPhaseOffset,
-                                                   static_cast<DispSync::Callback*>(this),
-                                                   mLastCallbackTime);
-        if (err != NO_ERROR) {
-            ALOGE("error registering vsync callback: %s (%d)", strerror(-err), err);
-        }
+        mCallbackRepeater->start(mWorkDuration, mReadyDuration);
         // ATRACE_INT(mVsyncOnLabel.c_str(), 1);
     } else {
-        status_t err = mDispSync->removeEventListener(static_cast<DispSync::Callback*>(this),
-                                                      &mLastCallbackTime);
-        if (err != NO_ERROR) {
-            ALOGE("error unregistering vsync callback: %s (%d)", strerror(-err), err);
-        }
+        mCallbackRepeater->stop();
         // ATRACE_INT(mVsyncOnLabel.c_str(), 0);
     }
     mEnabled = enable;
@@ -67,32 +158,22 @@
     mCallback = callback;
 }
 
-void DispSyncSource::setPhaseOffset(nsecs_t phaseOffset) {
+void DispSyncSource::setDuration(std::chrono::nanoseconds workDuration,
+                                 std::chrono::nanoseconds readyDuration) {
     std::lock_guard lock(mVsyncMutex);
-    const nsecs_t period = mDispSync->getPeriod();
-
-    // Normalize phaseOffset to [-period, period)
-    const int numPeriods = phaseOffset / period;
-    phaseOffset -= numPeriods * period;
-    if (mPhaseOffset == phaseOffset) {
-        return;
-    }
-
-    mPhaseOffset = phaseOffset;
+    mWorkDuration = workDuration;
+    mReadyDuration = readyDuration;
 
     // If we're not enabled, we don't need to mess with the listeners
     if (!mEnabled) {
         return;
     }
 
-    status_t err =
-            mDispSync->changePhaseOffset(static_cast<DispSync::Callback*>(this), mPhaseOffset);
-    if (err != NO_ERROR) {
-        ALOGE("error changing vsync offset: %s (%d)", strerror(-err), err);
-    }
+    mCallbackRepeater->start(mWorkDuration, mReadyDuration);
 }
 
-void DispSyncSource::onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) {
+void DispSyncSource::onVsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime,
+                                     nsecs_t readyTime) {
     VSyncSource::Callback* callback;
     {
         std::lock_guard lock(mCallbackMutex);
@@ -104,17 +185,13 @@
     }
 
     if (callback != nullptr) {
-        callback->onVSyncEvent(when, expectedVSyncTimestamp);
+        callback->onVSyncEvent(targetWakeupTime, vsyncTime, readyTime);
     }
 }
 
 void DispSyncSource::dump(std::string& result) const {
     std::lock_guard lock(mVsyncMutex);
     StringAppendF(&result, "DispSyncSource: %s(%s)\n", mName, mEnabled ? "enabled" : "disabled");
-    mDispSync->dump(result);
 }
 
-} // namespace android
-
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic pop // ignored "-Wconversion"
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/DispSyncSource.h b/services/surfaceflinger/Scheduler/DispSyncSource.h
index 2aee3f6..2fce235 100644
--- a/services/surfaceflinger/Scheduler/DispSyncSource.h
+++ b/services/surfaceflinger/Scheduler/DispSyncSource.h
@@ -18,45 +18,47 @@
 #include <mutex>
 #include <string>
 
-#include "DispSync.h"
 #include "EventThread.h"
 #include "TracedOrdinal.h"
+#include "VSyncDispatch.h"
 
-namespace android {
+namespace android::scheduler {
+class CallbackRepeater;
 
-class DispSyncSource final : public VSyncSource, private DispSync::Callback {
+class DispSyncSource final : public VSyncSource {
 public:
-    DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync, const char* name);
+    DispSyncSource(VSyncDispatch& vSyncDispatch, std::chrono::nanoseconds workDuration,
+                   std::chrono::nanoseconds readyDuration, bool traceVsync, const char* name);
 
-    ~DispSyncSource() override = default;
+    ~DispSyncSource() override;
 
     // The following methods are implementation of VSyncSource.
     const char* getName() const override { return mName; }
     void setVSyncEnabled(bool enable) override;
     void setCallback(VSyncSource::Callback* callback) override;
-    void setPhaseOffset(nsecs_t phaseOffset) override;
+    void setDuration(std::chrono::nanoseconds workDuration,
+                     std::chrono::nanoseconds readyDuration) override;
 
     void dump(std::string&) const override;
 
 private:
-    // The following method is the implementation of the DispSync::Callback.
-    void onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) override;
+    void onVsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime);
 
     const char* const mName;
     TracedOrdinal<int> mValue;
 
     const bool mTraceVsync;
     const std::string mVsyncOnLabel;
-    nsecs_t mLastCallbackTime GUARDED_BY(mVsyncMutex) = 0;
 
-    DispSync* mDispSync;
+    std::unique_ptr<CallbackRepeater> mCallbackRepeater;
 
     std::mutex mCallbackMutex;
     VSyncSource::Callback* mCallback GUARDED_BY(mCallbackMutex) = nullptr;
 
     mutable std::mutex mVsyncMutex;
-    TracedOrdinal<nsecs_t> mPhaseOffset GUARDED_BY(mVsyncMutex);
+    TracedOrdinal<std::chrono::nanoseconds> mWorkDuration GUARDED_BY(mVsyncMutex);
+    std::chrono::nanoseconds mReadyDuration GUARDED_BY(mVsyncMutex);
     bool mEnabled GUARDED_BY(mVsyncMutex) = false;
 };
 
-} // namespace android
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index 846190c..559626b 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -200,9 +200,10 @@
     mThread.join();
 }
 
-void EventThread::setPhaseOffset(nsecs_t phaseOffset) {
+void EventThread::setDuration(std::chrono::nanoseconds workDuration,
+                              std::chrono::nanoseconds readyDuration) {
     std::lock_guard<std::mutex> lock(mMutex);
-    mVSyncSource->setPhaseOffset(phaseOffset);
+    mVSyncSource->setDuration(workDuration, readyDuration);
 }
 
 sp<EventThreadConnection> EventThread::createEventConnection(
@@ -283,7 +284,8 @@
     mCondition.notify_all();
 }
 
-void EventThread::onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp) {
+void EventThread::onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp,
+                               nsecs_t /*deadlineTimestamp*/) {
     std::lock_guard<std::mutex> lock(mMutex);
 
     LOG_FATAL_IF(!mVSyncState);
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index 49f624c..fa1ca64 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -57,7 +57,8 @@
     class Callback {
     public:
         virtual ~Callback() {}
-        virtual void onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) = 0;
+        virtual void onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp,
+                                  nsecs_t deadlineTimestamp) = 0;
     };
 
     virtual ~VSyncSource() {}
@@ -65,7 +66,8 @@
     virtual const char* getName() const = 0;
     virtual void setVSyncEnabled(bool enable) = 0;
     virtual void setCallback(Callback* callback) = 0;
-    virtual void setPhaseOffset(nsecs_t phaseOffset) = 0;
+    virtual void setDuration(std::chrono::nanoseconds workDuration,
+                             std::chrono::nanoseconds readyDuration) = 0;
 
     virtual void dump(std::string& result) const = 0;
 };
@@ -116,7 +118,8 @@
 
     virtual void dump(std::string& result) const = 0;
 
-    virtual void setPhaseOffset(nsecs_t phaseOffset) = 0;
+    virtual void setDuration(std::chrono::nanoseconds workDuration,
+                             std::chrono::nanoseconds readyDuration) = 0;
 
     virtual status_t registerDisplayEventConnection(
             const sp<EventThreadConnection>& connection) = 0;
@@ -157,7 +160,8 @@
 
     void dump(std::string& result) const override;
 
-    void setPhaseOffset(nsecs_t phaseOffset) override;
+    void setDuration(std::chrono::nanoseconds workDuration,
+                     std::chrono::nanoseconds readyDuration) override;
 
     size_t getEventThreadConnectionCount() override;
 
@@ -177,7 +181,8 @@
             REQUIRES(mMutex);
 
     // Implements VSyncSource::Callback
-    void onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp) override;
+    void onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp,
+                      nsecs_t deadlineTimestamp) override;
 
     const std::unique_ptr<VSyncSource> mVSyncSource GUARDED_BY(mMutex);
 
diff --git a/services/surfaceflinger/Scheduler/InjectVSyncSource.h b/services/surfaceflinger/Scheduler/InjectVSyncSource.h
index 975c9db..016b076 100644
--- a/services/surfaceflinger/Scheduler/InjectVSyncSource.h
+++ b/services/surfaceflinger/Scheduler/InjectVSyncSource.h
@@ -35,16 +35,17 @@
         mCallback = callback;
     }
 
-    void onInjectSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) {
+    void onInjectSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp,
+                           nsecs_t deadlineTimestamp) {
         std::lock_guard<std::mutex> lock(mCallbackMutex);
         if (mCallback) {
-            mCallback->onVSyncEvent(when, expectedVSyncTimestamp);
+            mCallback->onVSyncEvent(when, expectedVSyncTimestamp, deadlineTimestamp);
         }
     }
 
     const char* getName() const override { return "inject"; }
     void setVSyncEnabled(bool) override {}
-    void setPhaseOffset(nsecs_t) override {}
+    void setDuration(std::chrono::nanoseconds, std::chrono::nanoseconds) override {}
     void dump(std::string&) const override {}
 
 private:
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index 5f7b2c2..017872c 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -184,16 +184,18 @@
     return *mVsyncSchedule.sync;
 }
 
-std::unique_ptr<VSyncSource> Scheduler::makePrimaryDispSyncSource(const char* name,
-                                                                  nsecs_t phaseOffsetNs) {
-    return std::make_unique<DispSyncSource>(&getPrimaryDispSync(), phaseOffsetNs,
-                                            true /* traceVsync */, name);
+std::unique_ptr<VSyncSource> Scheduler::makePrimaryDispSyncSource(
+        const char* name, std::chrono::nanoseconds workDuration,
+        std::chrono::nanoseconds readyDuration, bool traceVsync) {
+    return std::make_unique<scheduler::DispSyncSource>(*mVsyncSchedule.dispatch, workDuration,
+                                                       readyDuration, traceVsync, name);
 }
 
 Scheduler::ConnectionHandle Scheduler::createConnection(
-        const char* connectionName, nsecs_t phaseOffsetNs,
+        const char* connectionName, std::chrono::nanoseconds workDuration,
+        std::chrono::nanoseconds readyDuration,
         impl::EventThread::InterceptVSyncsCallback interceptCallback) {
-    auto vsyncSource = makePrimaryDispSyncSource(connectionName, phaseOffsetNs);
+    auto vsyncSource = makePrimaryDispSyncSource(connectionName, workDuration, readyDuration);
     auto eventThread = std::make_unique<impl::EventThread>(std::move(vsyncSource),
                                                            std::move(interceptCallback));
     return createConnection(std::move(eventThread));
@@ -286,9 +288,10 @@
     mConnections.at(handle).thread->dump(result);
 }
 
-void Scheduler::setPhaseOffset(ConnectionHandle handle, nsecs_t phaseOffset) {
+void Scheduler::setDuration(ConnectionHandle handle, std::chrono::nanoseconds workDuration,
+                            std::chrono::nanoseconds readyDuration) {
     RETURN_IF_INVALID_HANDLE(handle);
-    mConnections[handle].thread->setPhaseOffset(phaseOffset);
+    mConnections[handle].thread->setDuration(workDuration, readyDuration);
 }
 
 void Scheduler::getDisplayStatInfo(DisplayStatInfo* stats) {
@@ -318,12 +321,12 @@
     return mInjectorConnectionHandle;
 }
 
-bool Scheduler::injectVSync(nsecs_t when, nsecs_t expectedVSyncTime) {
+bool Scheduler::injectVSync(nsecs_t when, nsecs_t expectedVSyncTime, nsecs_t deadlineTimestamp) {
     if (!mInjectVSyncs || !mVSyncInjector) {
         return false;
     }
 
-    mVSyncInjector->onInjectSyncEvent(when, expectedVSyncTime);
+    mVSyncInjector->onInjectSyncEvent(when, expectedVSyncTime, deadlineTimestamp);
     return true;
 }
 
@@ -340,7 +343,6 @@
     std::lock_guard<std::mutex> lock(mHWVsyncLock);
     if (mPrimaryHWVsyncEnabled) {
         mSchedulerCallback.setVsyncEnabled(false);
-        getPrimaryDispSync().endResync();
         mPrimaryHWVsyncEnabled = false;
     }
     if (makeUnavailable) {
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index ed68b86..44d0d77 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -71,7 +71,9 @@
     DispSync& getPrimaryDispSync();
 
     using ConnectionHandle = scheduler::ConnectionHandle;
-    ConnectionHandle createConnection(const char* connectionName, nsecs_t phaseOffsetNs,
+    ConnectionHandle createConnection(const char* connectionName,
+                                      std::chrono::nanoseconds workDuration,
+                                      std::chrono::nanoseconds readyDuration,
                                       impl::EventThread::InterceptVSyncsCallback);
 
     sp<IDisplayEventConnection> createDisplayEventConnection(ConnectionHandle,
@@ -88,17 +90,16 @@
     void onScreenAcquired(ConnectionHandle);
     void onScreenReleased(ConnectionHandle);
 
-    // Modifies phase offset in the event thread.
-    void setPhaseOffset(ConnectionHandle, nsecs_t phaseOffset);
+    // Modifies work duration in the event thread.
+    void setDuration(ConnectionHandle, std::chrono::nanoseconds workDuration,
+                     std::chrono::nanoseconds readyDuration);
 
     void getDisplayStatInfo(DisplayStatInfo* stats);
 
     // Returns injector handle if injection has toggled, or an invalid handle otherwise.
     ConnectionHandle enableVSyncInjection(bool enable);
-
     // Returns false if injection is disabled.
-    bool injectVSync(nsecs_t when, nsecs_t expectedVSyncTime);
-
+    bool injectVSync(nsecs_t when, nsecs_t expectedVSyncTime, nsecs_t deadlineTimestamp);
     void enableHardwareVsync();
     void disableHardwareVsync(bool makeUnavailable);
 
@@ -151,6 +152,11 @@
 
     size_t getEventThreadConnectionCount(ConnectionHandle handle);
 
+    std::unique_ptr<VSyncSource> makePrimaryDispSyncSource(const char* name,
+                                                           std::chrono::nanoseconds workDuration,
+                                                           std::chrono::nanoseconds readyDuration,
+                                                           bool traceVsync = true);
+
 private:
     friend class TestableScheduler;
 
@@ -186,8 +192,6 @@
     static std::unique_ptr<LayerHistory> createLayerHistory(const scheduler::RefreshRateConfigs&,
                                                             bool useContentDetectionV2);
 
-    std::unique_ptr<VSyncSource> makePrimaryDispSyncSource(const char* name, nsecs_t phaseOffsetNs);
-
     // Create a connection on the given EventThread.
     ConnectionHandle createConnection(std::unique_ptr<EventThread>);
     sp<EventThreadConnection> createConnectionInternal(EventThread*,
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatch.h b/services/surfaceflinger/Scheduler/VSyncDispatch.h
index 2a2d7c5..0407900 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatch.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatch.h
@@ -40,11 +40,13 @@
 
     /*
      * A callback that can be registered to be awoken at a given time relative to a vsync event.
-     * \param [in] vsyncTime The timestamp of the vsync the callback is for.
-     * \param [in] targetWakeupTime The timestamp of intended wakeup time of the cb.
-     *
+     * \param [in] vsyncTime:        The timestamp of the vsync the callback is for.
+     * \param [in] targetWakeupTime: The timestamp of intended wakeup time of the cb.
+     * \param [in] readyTime:        The timestamp of intended time where client needs to finish
+     *                               its work by.
      */
-    using Callback = std::function<void(nsecs_t vsyncTime, nsecs_t targetWakeupTime)>;
+    using Callback =
+            std::function<void(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime)>;
 
     /*
      * Registers a callback that will be called at designated points on the vsync timeline.
@@ -71,33 +73,54 @@
     virtual void unregisterCallback(CallbackToken token) = 0;
 
     /*
+     * Timing information about a scheduled callback
+     *
+     * @workDuration:  The time needed for the client to perform its work
+     * @readyDuration: The time needed for the client to be ready before a vsync event.
+     *                 For external (non-SF) clients, not only do we need to account for their
+     *                 workDuration, but we also need to account for the time SF will take to
+     *                 process their buffer/transaction. In this case, readyDuration will be set
+     *                 to the SF duration in order to provide enough end-to-end time, and to be
+     *                 able to provide the ready-by time (deadline) on the callback.
+     *                 For internal clients, we don't need to add additional padding, so
+     *                 readyDuration will typically be 0.
+     * @earliestVsync: The targeted display time. This will be snapped to the closest
+     *                 predicted vsync time after earliestVsync.
+     *
+     * callback will be dispatched at 'workDuration + readyDuration' nanoseconds before a vsync
+     * event.
+     */
+    struct ScheduleTiming {
+        nsecs_t workDuration = 0;
+        nsecs_t readyDuration = 0;
+        nsecs_t earliestVsync = 0;
+    };
+
+    /*
      * Schedules the registered callback to be dispatched.
      *
-     * The callback will be dispatched at 'workDuration' nanoseconds before a vsync event.
+     * The callback will be dispatched at 'workDuration + readyDuration' nanoseconds before a vsync
+     * event.
      *
      * The caller designates the earliest vsync event that should be targeted by the earliestVsync
      * parameter.
-     * The callback will be scheduled at (workDuration - predictedVsync), where predictedVsync
-     * is the first vsync event time where ( predictedVsync >= earliestVsync ).
+     * The callback will be scheduled at (workDuration + readyDuration - predictedVsync), where
+     * predictedVsync is the first vsync event time where ( predictedVsync >= earliestVsync ).
      *
-     * If (workDuration - earliestVsync) is in the past, or if a callback has already been
-     * dispatched for the predictedVsync, an error will be returned.
+     * If (workDuration + readyDuration - earliestVsync) is in the past, or if a callback has
+     * already been dispatched for the predictedVsync, an error will be returned.
      *
      * It is valid to reschedule a callback to a different time.
      *
      * \param [in] token           The callback to schedule.
-     * \param [in] workDuration    The time before the actual vsync time to invoke the callback
-     *                             associated with token.
-     * \param [in] earliestVsync   The targeted display time. This will be snapped to the closest
-     *                             predicted vsync time after earliestVsync.
+     * \param [in] scheduleTiming  The timing information for this schedule call
      * \return                     A ScheduleResult::Scheduled if callback was scheduled.
      *                             A ScheduleResult::CannotSchedule
-     *                             if (workDuration - earliestVsync) is in the past, or
-     *                             if a callback was dispatched for the predictedVsync already.
-     *                             A ScheduleResult::Error if there was another error.
+     *                             if (workDuration + readyDuration - earliestVsync) is in the past,
+     * or if a callback was dispatched for the predictedVsync already. A ScheduleResult::Error if
+     * there was another error.
      */
-    virtual ScheduleResult schedule(CallbackToken token, nsecs_t workDuration,
-                                    nsecs_t earliestVsync) = 0;
+    virtual ScheduleResult schedule(CallbackToken token, ScheduleTiming scheduleTiming) = 0;
 
     /* Cancels a scheduled callback, if possible.
      *
@@ -129,7 +152,7 @@
     ~VSyncCallbackRegistration();
 
     // See documentation for VSyncDispatch::schedule.
-    ScheduleResult schedule(nsecs_t workDuration, nsecs_t earliestVsync);
+    ScheduleResult schedule(VSyncDispatch::ScheduleTiming scheduleTiming);
 
     // See documentation for VSyncDispatch::cancel.
     CancelResult cancel();
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
index ef92680..2154a40 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
@@ -35,8 +35,6 @@
                                                            nsecs_t minVsyncDistance)
       : mName(name),
         mCallback(cb),
-        mWorkDuration(0),
-        mEarliestVsync(0),
         mMinVsyncDistance(minVsyncDistance) {}
 
 std::optional<nsecs_t> VSyncDispatchTimerQueueEntry::lastExecutedVsyncTarget() const {
@@ -54,6 +52,13 @@
     return {mArmedInfo->mActualWakeupTime};
 }
 
+std::optional<nsecs_t> VSyncDispatchTimerQueueEntry::readyTime() const {
+    if (!mArmedInfo) {
+        return {};
+    }
+    return {mArmedInfo->mActualReadyTime};
+}
+
 std::optional<nsecs_t> VSyncDispatchTimerQueueEntry::targetVsync() const {
     if (!mArmedInfo) {
         return {};
@@ -61,10 +66,10 @@
     return {mArmedInfo->mActualVsyncTime};
 }
 
-ScheduleResult VSyncDispatchTimerQueueEntry::schedule(nsecs_t workDuration, nsecs_t earliestVsync,
+ScheduleResult VSyncDispatchTimerQueueEntry::schedule(VSyncDispatch::ScheduleTiming timing,
                                                       VSyncTracker& tracker, nsecs_t now) {
-    auto nextVsyncTime =
-            tracker.nextAnticipatedVSyncTimeFrom(std::max(earliestVsync, now + workDuration));
+    auto nextVsyncTime = tracker.nextAnticipatedVSyncTimeFrom(
+            std::max(timing.earliestVsync, now + timing.workDuration + timing.readyDuration));
 
     bool const wouldSkipAVsyncTarget =
             mArmedInfo && (nextVsyncTime > (mArmedInfo->mActualVsyncTime + mMinVsyncDistance));
@@ -80,16 +85,15 @@
                 tracker.nextAnticipatedVSyncTimeFrom(*mLastDispatchTime + mMinVsyncDistance);
     }
 
-    auto const nextWakeupTime = nextVsyncTime - workDuration;
-    mWorkDuration = workDuration;
-    mEarliestVsync = earliestVsync;
-    mArmedInfo = {nextWakeupTime, nextVsyncTime};
+    auto const nextWakeupTime = nextVsyncTime - timing.workDuration - timing.readyDuration;
+    auto const nextReadyTime = nextVsyncTime - timing.readyDuration;
+    mScheduleTiming = timing;
+    mArmedInfo = {nextWakeupTime, nextVsyncTime, nextReadyTime};
     return ScheduleResult::Scheduled;
 }
 
-void VSyncDispatchTimerQueueEntry::addPendingWorkloadUpdate(nsecs_t workDuration,
-                                                            nsecs_t earliestVsync) {
-    mWorkloadUpdateInfo = {.earliestVsync = earliestVsync, .duration = workDuration};
+void VSyncDispatchTimerQueueEntry::addPendingWorkloadUpdate(VSyncDispatch::ScheduleTiming timing) {
+    mWorkloadUpdateInfo = timing;
 }
 
 bool VSyncDispatchTimerQueueEntry::hasPendingWorkloadUpdate() const {
@@ -102,14 +106,18 @@
     }
 
     if (mWorkloadUpdateInfo) {
-        mEarliestVsync = mWorkloadUpdateInfo->earliestVsync;
-        mWorkDuration = mWorkloadUpdateInfo->duration;
+        mScheduleTiming = *mWorkloadUpdateInfo;
         mWorkloadUpdateInfo.reset();
     }
 
-    auto const nextVsyncTime =
-            tracker.nextAnticipatedVSyncTimeFrom(std::max(mEarliestVsync, now + mWorkDuration));
-    mArmedInfo = {nextVsyncTime - mWorkDuration, nextVsyncTime};
+    const auto earliestReadyBy = now + mScheduleTiming.workDuration + mScheduleTiming.readyDuration;
+    const auto earliestVsync = std::max(earliestReadyBy, mScheduleTiming.earliestVsync);
+
+    const auto nextVsyncTime = tracker.nextAnticipatedVSyncTimeFrom(earliestVsync);
+    const auto nextReadyTime = nextVsyncTime - mScheduleTiming.readyDuration;
+    const auto nextWakeupTime = nextReadyTime - mScheduleTiming.workDuration;
+
+    mArmedInfo = {nextWakeupTime, nextVsyncTime, nextReadyTime};
 }
 
 void VSyncDispatchTimerQueueEntry::disarm() {
@@ -122,13 +130,14 @@
     return *mLastDispatchTime;
 }
 
-void VSyncDispatchTimerQueueEntry::callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp) {
+void VSyncDispatchTimerQueueEntry::callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp,
+                                            nsecs_t deadlineTimestamp) {
     {
         std::lock_guard<std::mutex> lk(mRunningMutex);
         mRunning = true;
     }
 
-    mCallback(vsyncTimestamp, wakeupTimestamp);
+    mCallback(vsyncTimestamp, wakeupTimestamp, deadlineTimestamp);
 
     std::lock_guard<std::mutex> lk(mRunningMutex);
     mRunning = false;
@@ -144,15 +153,20 @@
     std::lock_guard<std::mutex> lk(mRunningMutex);
     std::string armedInfo;
     if (mArmedInfo) {
-        StringAppendF(&armedInfo, "[wake up in %.2fms for vsync %.2fms from now]",
+        StringAppendF(&armedInfo,
+                      "[wake up in %.2fms deadline in %.2fms for vsync %.2fms from now]",
                       (mArmedInfo->mActualWakeupTime - systemTime()) / 1e6f,
+                      (mArmedInfo->mActualReadyTime - systemTime()) / 1e6f,
                       (mArmedInfo->mActualVsyncTime - systemTime()) / 1e6f);
     }
 
     StringAppendF(&result, "\t\t%s: %s %s\n", mName.c_str(),
                   mRunning ? "(in callback function)" : "", armedInfo.c_str());
-    StringAppendF(&result, "\t\t\tmWorkDuration: %.2fms mEarliestVsync: %.2fms relative to now\n",
-                  mWorkDuration / 1e6f, (mEarliestVsync - systemTime()) / 1e6f);
+    StringAppendF(&result,
+                  "\t\t\tworkDuration: %.2fms readyDuration: %.2fms earliestVsync: %.2fms relative "
+                  "to now\n",
+                  mScheduleTiming.workDuration / 1e6f, mScheduleTiming.readyDuration / 1e6f,
+                  (mScheduleTiming.earliestVsync - systemTime()) / 1e6f);
 
     if (mLastDispatchTime) {
         StringAppendF(&result, "\t\t\tmLastDispatchTime: %.2fms ago\n",
@@ -239,6 +253,7 @@
         std::shared_ptr<VSyncDispatchTimerQueueEntry> callback;
         nsecs_t vsyncTimestamp;
         nsecs_t wakeupTimestamp;
+        nsecs_t deadlineTimestamp;
     };
     std::vector<Invocation> invocations;
     {
@@ -252,11 +267,13 @@
                 continue;
             }
 
+            auto const readyTime = callback->readyTime();
+
             auto const lagAllowance = std::max(now - mIntendedWakeupTime, static_cast<nsecs_t>(0));
             if (*wakeupTime < mIntendedWakeupTime + mTimerSlack + lagAllowance) {
                 callback->executing();
-                invocations.emplace_back(
-                        Invocation{callback, *callback->lastExecutedVsyncTarget(), *wakeupTime});
+                invocations.emplace_back(Invocation{callback, *callback->lastExecutedVsyncTarget(),
+                                                    *wakeupTime, *readyTime});
             }
         }
 
@@ -265,7 +282,8 @@
     }
 
     for (auto const& invocation : invocations) {
-        invocation.callback->callback(invocation.vsyncTimestamp, invocation.wakeupTimestamp);
+        invocation.callback->callback(invocation.vsyncTimestamp, invocation.wakeupTimestamp,
+                                      invocation.deadlineTimestamp);
     }
 }
 
@@ -297,8 +315,8 @@
     }
 }
 
-ScheduleResult VSyncDispatchTimerQueue::schedule(CallbackToken token, nsecs_t workDuration,
-                                                 nsecs_t earliestVsync) {
+ScheduleResult VSyncDispatchTimerQueue::schedule(CallbackToken token,
+                                                 ScheduleTiming scheduleTiming) {
     auto result = ScheduleResult::Error;
     {
         std::lock_guard<decltype(mMutex)> lk(mMutex);
@@ -314,11 +332,11 @@
          * timer recalculation to avoid cancelling a callback that is about to fire. */
         auto const rearmImminent = now > mIntendedWakeupTime;
         if (CC_UNLIKELY(rearmImminent)) {
-            callback->addPendingWorkloadUpdate(workDuration, earliestVsync);
+            callback->addPendingWorkloadUpdate(scheduleTiming);
             return ScheduleResult::Scheduled;
         }
 
-        result = callback->schedule(workDuration, earliestVsync, mTracker, now);
+        result = callback->schedule(scheduleTiming, mTracker, now);
         if (result == ScheduleResult::CannotSchedule) {
             return result;
         }
@@ -396,11 +414,11 @@
     if (mValidToken) mDispatch.get().unregisterCallback(mToken);
 }
 
-ScheduleResult VSyncCallbackRegistration::schedule(nsecs_t workDuration, nsecs_t earliestVsync) {
+ScheduleResult VSyncCallbackRegistration::schedule(VSyncDispatch::ScheduleTiming scheduleTiming) {
     if (!mValidToken) {
         return ScheduleResult::Error;
     }
-    return mDispatch.get().schedule(mToken, workDuration, earliestVsync);
+    return mDispatch.get().schedule(mToken, scheduleTiming);
 }
 
 CancelResult VSyncCallbackRegistration::cancel() {
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
index 957c0d1..26237b6 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
@@ -47,7 +47,7 @@
     std::optional<nsecs_t> lastExecutedVsyncTarget() const;
 
     // This moves the state from disarmed->armed and will calculate the wakeupTime.
-    ScheduleResult schedule(nsecs_t workDuration, nsecs_t earliestVsync, VSyncTracker& tracker,
+    ScheduleResult schedule(VSyncDispatch::ScheduleTiming timing, VSyncTracker& tracker,
                             nsecs_t now);
     // This will update armed entries with the latest vsync information. Entry remains armed.
     void update(VSyncTracker& tracker, nsecs_t now);
@@ -56,6 +56,8 @@
     // It will not update the wakeupTime.
     std::optional<nsecs_t> wakeupTime() const;
 
+    std::optional<nsecs_t> readyTime() const;
+
     std::optional<nsecs_t> targetVsync() const;
 
     // This moves state from armed->disarmed.
@@ -67,14 +69,14 @@
 
     // Adds a pending upload of the earliestVSync and workDuration that will be applied on the next
     // call to update()
-    void addPendingWorkloadUpdate(nsecs_t workDuration, nsecs_t earliestVsync);
+    void addPendingWorkloadUpdate(VSyncDispatch::ScheduleTiming);
 
     // Checks if there is a pending update to the workload, returning true if so.
     bool hasPendingWorkloadUpdate() const;
     // End: functions that are not threadsafe.
 
     // Invoke the callback with the two given timestamps, moving the state from running->disarmed.
-    void callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp);
+    void callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp, nsecs_t deadlineTimestamp);
     // Block calling thread while the callback is executing.
     void ensureNotRunning();
 
@@ -84,22 +86,18 @@
     std::string const mName;
     VSyncDispatch::Callback const mCallback;
 
-    nsecs_t mWorkDuration;
-    nsecs_t mEarliestVsync;
+    VSyncDispatch::ScheduleTiming mScheduleTiming;
     nsecs_t const mMinVsyncDistance;
 
     struct ArmingInfo {
         nsecs_t mActualWakeupTime;
         nsecs_t mActualVsyncTime;
+        nsecs_t mActualReadyTime;
     };
     std::optional<ArmingInfo> mArmedInfo;
     std::optional<nsecs_t> mLastDispatchTime;
 
-    struct WorkloadUpdateInfo {
-        nsecs_t duration;
-        nsecs_t earliestVsync;
-    };
-    std::optional<WorkloadUpdateInfo> mWorkloadUpdateInfo;
+    std::optional<VSyncDispatch::ScheduleTiming> mWorkloadUpdateInfo;
 
     mutable std::mutex mRunningMutex;
     std::condition_variable mCv;
@@ -125,7 +123,7 @@
 
     CallbackToken registerCallback(Callback const& callbackFn, std::string callbackName) final;
     void unregisterCallback(CallbackToken token) final;
-    ScheduleResult schedule(CallbackToken token, nsecs_t workDuration, nsecs_t earliestVsync) final;
+    ScheduleResult schedule(CallbackToken token, ScheduleTiming scheduleTiming) final;
     CancelResult cancel(CallbackToken token) final;
     void dump(std::string& result) const final;
 
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index 61f3fbb..e90edf7 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -14,10 +14,6 @@
  * limitations under the License.
  */
 
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wconversion"
-
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
 #include "VSyncPredictor.h"
@@ -54,7 +50,7 @@
     }
 }
 
-inline size_t VSyncPredictor::next(int i) const {
+inline size_t VSyncPredictor::next(size_t i) const {
     return (i + 1) % mTimestamps.size();
 }
 
@@ -69,12 +65,12 @@
 }
 
 nsecs_t VSyncPredictor::currentPeriod() const {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     return std::get<0>(mRateMap.find(mIdealPeriod)->second);
 }
 
 bool VSyncPredictor::addVsyncTimestamp(nsecs_t timestamp) {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
 
     if (!validate(timestamp)) {
         // VSR could elect to ignore the incongruent timestamp or resetModel(). If ts is ignored,
@@ -138,14 +134,14 @@
 
     auto meanTS = scheduler::calculate_mean(vsyncTS);
     auto meanOrdinal = scheduler::calculate_mean(ordinals);
-    for (auto i = 0; i < vsyncTS.size(); i++) {
+    for (size_t i = 0; i < vsyncTS.size(); i++) {
         vsyncTS[i] -= meanTS;
         ordinals[i] -= meanOrdinal;
     }
 
     auto top = 0ll;
     auto bottom = 0ll;
-    for (auto i = 0; i < vsyncTS.size(); i++) {
+    for (size_t i = 0; i < vsyncTS.size(); i++) {
         top += vsyncTS[i] * ordinals[i];
         bottom += ordinals[i] * ordinals[i];
     }
@@ -177,9 +173,9 @@
 }
 
 nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
 
-    auto const [slope, intercept] = getVSyncPredictionModel(lk);
+    auto const [slope, intercept] = getVSyncPredictionModel(lock);
 
     if (mTimestamps.empty()) {
         traceInt64If("VSP-mode", 1);
@@ -215,8 +211,8 @@
 }
 
 std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel() const {
-    std::lock_guard<std::mutex> lk(mMutex);
-    return VSyncPredictor::getVSyncPredictionModel(lk);
+    std::lock_guard lock(mMutex);
+    return VSyncPredictor::getVSyncPredictionModel(lock);
 }
 
 std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel(
@@ -227,7 +223,7 @@
 void VSyncPredictor::setPeriod(nsecs_t period) {
     ATRACE_CALL();
 
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     static constexpr size_t kSizeLimit = 30;
     if (CC_UNLIKELY(mRateMap.size() == kSizeLimit)) {
         mRateMap.erase(mRateMap.begin());
@@ -256,18 +252,18 @@
 }
 
 bool VSyncPredictor::needsMoreSamples() const {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     return mTimestamps.size() < kMinimumSamplesForPrediction;
 }
 
 void VSyncPredictor::resetModel() {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     mRateMap[mIdealPeriod] = {mIdealPeriod, 0};
     clearTimestamps();
 }
 
 void VSyncPredictor::dump(std::string& result) const {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     StringAppendF(&result, "\tmIdealPeriod=%.2f\n", mIdealPeriod / 1e6f);
     StringAppendF(&result, "\tRefresh Rate Map:\n");
     for (const auto& [idealPeriod, periodInterceptTuple] : mRateMap) {
@@ -280,5 +276,3 @@
 
 } // namespace android::scheduler
 
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic pop // ignored "-Wconversion"
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
index 5f3c418..5f2ec49 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.h
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -74,7 +74,7 @@
     size_t const kOutlierTolerancePercent;
 
     std::mutex mutable mMutex;
-    size_t next(int i) const REQUIRES(mMutex);
+    size_t next(size_t i) const REQUIRES(mMutex);
     bool validate(nsecs_t timestamp) const REQUIRES(mMutex);
     std::tuple<nsecs_t, nsecs_t> getVSyncPredictionModel(std::lock_guard<std::mutex> const&) const
             REQUIRES(mMutex);
@@ -84,7 +84,7 @@
 
     std::unordered_map<nsecs_t, std::tuple<nsecs_t, nsecs_t>> mutable mRateMap GUARDED_BY(mMutex);
 
-    int mLastTimestampIndex GUARDED_BY(mMutex) = 0;
+    size_t mLastTimestampIndex GUARDED_BY(mMutex) = 0;
     std::vector<nsecs_t> mTimestamps GUARDED_BY(mMutex);
 };
 
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.cpp b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
index a2b279b..792313e 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
@@ -38,20 +38,20 @@
 class PredictedVsyncTracer {
 public:
     PredictedVsyncTracer(VSyncDispatch& dispatch)
-          : mRegistration(dispatch,
-                          std::bind(&PredictedVsyncTracer::callback, this, std::placeholders::_1,
-                                    std::placeholders::_2),
+          : mRegistration(dispatch, std::bind(&PredictedVsyncTracer::callback, this),
                           "PredictedVsyncTracer") {
-        mRegistration.schedule(0, 0);
+        scheduleRegistration();
     }
 
 private:
     TracedOrdinal<bool> mParity = {"VSYNC-predicted", 0};
     VSyncCallbackRegistration mRegistration;
 
-    void callback(nsecs_t /*vsyncTime*/, nsecs_t /*targetWakeupTim*/) {
+    void scheduleRegistration() { mRegistration.schedule({0, 0, 0}); }
+
+    void callback() {
         mParity = !mParity;
-        mRegistration.schedule(0, 0);
+        scheduleRegistration();
     }
 };
 
@@ -69,96 +69,6 @@
 
 VSyncReactor::~VSyncReactor() = default;
 
-// The DispSync interface has a 'repeat this callback at rate' semantic. This object adapts
-// VSyncDispatch's individually-scheduled callbacks so as to meet DispSync's existing semantic
-// for now.
-class CallbackRepeater {
-public:
-    CallbackRepeater(VSyncDispatch& dispatch, DispSync::Callback* cb, const char* name,
-                     nsecs_t period, nsecs_t offset, nsecs_t notBefore)
-          : mName(name),
-            mCallback(cb),
-            mRegistration(dispatch,
-                          std::bind(&CallbackRepeater::callback, this, std::placeholders::_1,
-                                    std::placeholders::_2),
-                          mName),
-            mPeriod(period),
-            mOffset(offset),
-            mLastCallTime(notBefore) {}
-
-    ~CallbackRepeater() {
-        std::lock_guard<std::mutex> lk(mMutex);
-        mRegistration.cancel();
-    }
-
-    void start(nsecs_t offset) {
-        std::lock_guard<std::mutex> lk(mMutex);
-        mStopped = false;
-        mOffset = offset;
-
-        auto const schedule_result = mRegistration.schedule(calculateWorkload(), mLastCallTime);
-        LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
-                            "Error scheduling callback: rc %X", schedule_result);
-    }
-
-    void setPeriod(nsecs_t period) {
-        std::lock_guard<std::mutex> lk(mMutex);
-        if (period == mPeriod) {
-            return;
-        }
-        mPeriod = period;
-    }
-
-    void stop() {
-        std::lock_guard<std::mutex> lk(mMutex);
-        LOG_ALWAYS_FATAL_IF(mStopped, "DispSyncInterface misuse: callback already stopped");
-        mStopped = true;
-        mRegistration.cancel();
-    }
-
-    void dump(std::string& result) const {
-        std::lock_guard<std::mutex> lk(mMutex);
-        StringAppendF(&result, "\t%s: mPeriod=%.2f last vsync time %.2fms relative to now (%s)\n",
-                      mName.c_str(), mPeriod / 1e6f, (mLastCallTime - systemTime()) / 1e6f,
-                      mStopped ? "stopped" : "running");
-    }
-
-private:
-    void callback(nsecs_t vsynctime, nsecs_t wakeupTime) {
-        {
-            std::lock_guard<std::mutex> lk(mMutex);
-            mLastCallTime = vsynctime;
-        }
-
-        mCallback->onDispSyncEvent(wakeupTime, vsynctime);
-
-        {
-            std::lock_guard<std::mutex> lk(mMutex);
-            if (mStopped) {
-                return;
-            }
-            auto const schedule_result = mRegistration.schedule(calculateWorkload(), vsynctime);
-            LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
-                                "Error rescheduling callback: rc %X", schedule_result);
-        }
-    }
-
-    // DispSync offsets are defined as time after the vsync before presentation.
-    // VSyncReactor workloads are defined as time before the intended presentation vsync.
-    // Note change in sign between the two defnitions.
-    nsecs_t calculateWorkload() REQUIRES(mMutex) { return mPeriod - mOffset; }
-
-    const std::string mName;
-    DispSync::Callback* const mCallback;
-
-    std::mutex mutable mMutex;
-    VSyncCallbackRegistration mRegistration GUARDED_BY(mMutex);
-    bool mStopped GUARDED_BY(mMutex) = false;
-    nsecs_t mPeriod GUARDED_BY(mMutex);
-    nsecs_t mOffset GUARDED_BY(mMutex);
-    nsecs_t mLastCallTime GUARDED_BY(mMutex);
-};
-
 bool VSyncReactor::addPresentFence(const std::shared_ptr<FenceTime>& fence) {
     if (!fence) {
         return false;
@@ -169,7 +79,7 @@
         return true;
     }
 
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     if (mExternalIgnoreFences || mInternalIgnoreFences) {
         return true;
     }
@@ -207,7 +117,7 @@
 }
 
 void VSyncReactor::setIgnorePresentFences(bool ignoration) {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     mExternalIgnoreFences = ignoration;
     updateIgnorePresentFencesInternal();
 }
@@ -269,8 +179,6 @@
     mTracker.resetModel();
 }
 
-void VSyncReactor::endResync() {}
-
 bool VSyncReactor::periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> HwcVsyncPeriod) {
     if (!mPeriodConfirmationInProgress) {
         return false;
@@ -303,14 +211,11 @@
                                    bool* periodFlushed) {
     assert(periodFlushed);
 
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
         ATRACE_NAME("VSR: period confirmed");
         if (mPeriodTransitioningTo) {
             mTracker.setPeriod(*mPeriodTransitioningTo);
-            for (auto& entry : mCallbacks) {
-                entry.second->setPeriod(*mPeriodTransitioningTo);
-            }
             *periodFlushed = true;
         }
 
@@ -339,51 +244,8 @@
     return mMoreSamplesNeeded;
 }
 
-status_t VSyncReactor::addEventListener(const char* name, nsecs_t phase,
-                                        DispSync::Callback* callback,
-                                        nsecs_t /* lastCallbackTime */) {
-    std::lock_guard<std::mutex> lk(mMutex);
-    auto it = mCallbacks.find(callback);
-    if (it == mCallbacks.end()) {
-        // TODO (b/146557561): resolve lastCallbackTime semantics in DispSync i/f.
-        static auto constexpr maxListeners = 4;
-        if (mCallbacks.size() >= maxListeners) {
-            ALOGE("callback %s not added, exceeded callback limit of %i (currently %zu)", name,
-                  maxListeners, mCallbacks.size());
-            return NO_MEMORY;
-        }
-
-        auto const period = mTracker.currentPeriod();
-        auto repeater = std::make_unique<CallbackRepeater>(mDispatch, callback, name, period, phase,
-                                                           mClock->now());
-        it = mCallbacks.emplace(std::pair(callback, std::move(repeater))).first;
-    }
-
-    it->second->start(phase);
-    return NO_ERROR;
-}
-
-status_t VSyncReactor::removeEventListener(DispSync::Callback* callback,
-                                           nsecs_t* /* outLastCallback */) {
-    std::lock_guard<std::mutex> lk(mMutex);
-    auto const it = mCallbacks.find(callback);
-    LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback %p not registered", callback);
-
-    it->second->stop();
-    return NO_ERROR;
-}
-
-status_t VSyncReactor::changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) {
-    std::lock_guard<std::mutex> lk(mMutex);
-    auto const it = mCallbacks.find(callback);
-    LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback was %p not registered", callback);
-
-    it->second->start(phase);
-    return NO_ERROR;
-}
-
 void VSyncReactor::dump(std::string& result) const {
-    std::lock_guard<std::mutex> lk(mMutex);
+    std::lock_guard lock(mMutex);
     StringAppendF(&result, "VsyncReactor in use\n");
     StringAppendF(&result, "Has %zu unfired fences\n", mUnfiredFences.size());
     StringAppendF(&result, "mInternalIgnoreFences=%d mExternalIgnoreFences=%d\n",
@@ -403,17 +265,10 @@
         StringAppendF(&result, "No Last HW vsync\n");
     }
 
-    StringAppendF(&result, "CallbackRepeaters:\n");
-    for (const auto& [callback, repeater] : mCallbacks) {
-        repeater->dump(result);
-    }
-
     StringAppendF(&result, "VSyncTracker:\n");
     mTracker.dump(result);
     StringAppendF(&result, "VSyncDispatch:\n");
     mDispatch.dump(result);
 }
 
-void VSyncReactor::reset() {}
-
 } // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.h b/services/surfaceflinger/Scheduler/VSyncReactor.h
index 22ceb39..c6f2777 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.h
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.h
@@ -29,7 +29,6 @@
 class Clock;
 class VSyncDispatch;
 class VSyncTracker;
-class CallbackRepeater;
 class PredictedVsyncTracer;
 
 // TODO (b/145217110): consider renaming.
@@ -52,15 +51,8 @@
     void beginResync() final;
     bool addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
                          bool* periodFlushed) final;
-    void endResync() final;
-
-    status_t addEventListener(const char* name, nsecs_t phase, DispSync::Callback* callback,
-                              nsecs_t lastCallbackTime) final;
-    status_t removeEventListener(DispSync::Callback* callback, nsecs_t* outLastCallback) final;
-    status_t changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) final;
 
     void dump(std::string& result) const final;
-    void reset() final;
 
 private:
     void setIgnorePresentFencesInternal(bool ignoration) REQUIRES(mMutex);
@@ -85,9 +77,6 @@
     std::optional<nsecs_t> mPeriodTransitioningTo GUARDED_BY(mMutex);
     std::optional<nsecs_t> mLastHwVsync GUARDED_BY(mMutex);
 
-    std::unordered_map<DispSync::Callback*, std::unique_ptr<CallbackRepeater>> mCallbacks
-            GUARDED_BY(mMutex);
-
     const std::unique_ptr<PredictedVsyncTracer> mPredictedVsyncTracer;
     const bool mSupportKernelIdleTimer = false;
 };