SurfaceFlinger: use duration instead of offset
This change is using the duration given to sf/app without
converting them back to the legacy offset. This allows us to get
the expected vsync associated with a wakeup event.
This change also required some refactoring:
- Move the periodic callback interface out of DispSync and in
to DispSyncSource
- Translate legacy offsets to duration in case that duration
is not specified in the build files
- Add support to VSD to expose the deadline timestamp of when
a frame needs to be ready by
Bug: 162888874
Test: SF unit tests
Test: examine systraces
Change-Id: I87a53cc7dea931d3c195eab6842e003ca4516885
diff --git a/services/surfaceflinger/RegionSamplingThread.cpp b/services/surfaceflinger/RegionSamplingThread.cpp
index 0157a7f..b24855d 100644
--- a/services/surfaceflinger/RegionSamplingThread.cpp
+++ b/services/surfaceflinger/RegionSamplingThread.cpp
@@ -61,7 +61,7 @@
constexpr auto timeForRegionSampling = 5000000ns;
constexpr auto maxRegionSamplingSkips = 10;
-constexpr auto defaultRegionSamplingOffset = -3ms;
+constexpr auto defaultRegionSamplingWorkDuration = 3ms;
constexpr auto defaultRegionSamplingPeriod = 100ms;
constexpr auto defaultRegionSamplingTimerTimeout = 100ms;
// TODO: (b/127403193) duration to string conversion could probably be constexpr
@@ -73,9 +73,9 @@
RegionSamplingThread::EnvironmentTimingTunables::EnvironmentTimingTunables() {
char value[PROPERTY_VALUE_MAX] = {};
- property_get("debug.sf.region_sampling_offset_ns", value,
- toNsString(defaultRegionSamplingOffset).c_str());
- int const samplingOffsetNsRaw = atoi(value);
+ property_get("debug.sf.region_sampling_duration_ns", value,
+ toNsString(defaultRegionSamplingWorkDuration).c_str());
+ int const samplingDurationNsRaw = atoi(value);
property_get("debug.sf.region_sampling_period_ns", value,
toNsString(defaultRegionSamplingPeriod).c_str());
@@ -87,22 +87,26 @@
if ((samplingPeriodNsRaw < 0) || (samplingTimerTimeoutNsRaw < 0)) {
ALOGW("User-specified sampling tuning options nonsensical. Using defaults");
- mSamplingOffset = defaultRegionSamplingOffset;
+ mSamplingDuration = defaultRegionSamplingWorkDuration;
mSamplingPeriod = defaultRegionSamplingPeriod;
mSamplingTimerTimeout = defaultRegionSamplingTimerTimeout;
} else {
- mSamplingOffset = std::chrono::nanoseconds(samplingOffsetNsRaw);
+ mSamplingDuration = std::chrono::nanoseconds(samplingDurationNsRaw);
mSamplingPeriod = std::chrono::nanoseconds(samplingPeriodNsRaw);
mSamplingTimerTimeout = std::chrono::nanoseconds(samplingTimerTimeoutNsRaw);
}
}
-struct SamplingOffsetCallback : DispSync::Callback {
+struct SamplingOffsetCallback : VSyncSource::Callback {
SamplingOffsetCallback(RegionSamplingThread& samplingThread, Scheduler& scheduler,
- std::chrono::nanoseconds targetSamplingOffset)
+ std::chrono::nanoseconds targetSamplingWorkDuration)
: mRegionSamplingThread(samplingThread),
- mScheduler(scheduler),
- mTargetSamplingOffset(targetSamplingOffset) {}
+ mTargetSamplingWorkDuration(targetSamplingWorkDuration),
+ mVSyncSource(scheduler.makePrimaryDispSyncSource("SamplingThreadDispSyncListener", 0ns,
+ 0ns,
+ /*traceVsync=*/false)) {
+ mVSyncSource->setCallback(this);
+ }
~SamplingOffsetCallback() { stopVsyncListener(); }
@@ -114,8 +118,7 @@
if (mVsyncListening) return;
mPhaseIntervalSetting = Phase::ZERO;
- mScheduler.getPrimaryDispSync().addEventListener("SamplingThreadDispSyncListener", 0, this,
- mLastCallbackTime);
+ mVSyncSource->setVSyncEnabled(true);
mVsyncListening = true;
}
@@ -128,23 +131,24 @@
void stopVsyncListenerLocked() /*REQUIRES(mMutex)*/ {
if (!mVsyncListening) return;
- mScheduler.getPrimaryDispSync().removeEventListener(this, &mLastCallbackTime);
+ mVSyncSource->setVSyncEnabled(false);
mVsyncListening = false;
}
- void onDispSyncEvent(nsecs_t /*when*/, nsecs_t /*expectedVSyncTimestamp*/) final {
+ void onVSyncEvent(nsecs_t /*when*/, nsecs_t /*expectedVSyncTimestamp*/,
+ nsecs_t /*deadlineTimestamp*/) final {
std::unique_lock<decltype(mMutex)> lock(mMutex);
if (mPhaseIntervalSetting == Phase::ZERO) {
ATRACE_INT(lumaSamplingStepTag, static_cast<int>(samplingStep::waitForSamplePhase));
mPhaseIntervalSetting = Phase::SAMPLING;
- mScheduler.getPrimaryDispSync().changePhaseOffset(this, mTargetSamplingOffset.count());
+ mVSyncSource->setDuration(mTargetSamplingWorkDuration, 0ns);
return;
}
if (mPhaseIntervalSetting == Phase::SAMPLING) {
mPhaseIntervalSetting = Phase::ZERO;
- mScheduler.getPrimaryDispSync().changePhaseOffset(this, 0);
+ mVSyncSource->setDuration(0ns, 0ns);
stopVsyncListenerLocked();
lock.unlock();
mRegionSamplingThread.notifySamplingOffset();
@@ -153,16 +157,15 @@
}
RegionSamplingThread& mRegionSamplingThread;
- Scheduler& mScheduler;
- const std::chrono::nanoseconds mTargetSamplingOffset;
+ const std::chrono::nanoseconds mTargetSamplingWorkDuration;
mutable std::mutex mMutex;
- nsecs_t mLastCallbackTime = 0;
enum class Phase {
ZERO,
SAMPLING
} mPhaseIntervalSetting /*GUARDED_BY(mMutex) macro doesnt work with unique_lock?*/
= Phase::ZERO;
bool mVsyncListening /*GUARDED_BY(mMutex)*/ = false;
+ std::unique_ptr<VSyncSource> mVSyncSource;
};
RegionSamplingThread::RegionSamplingThread(SurfaceFlinger& flinger, Scheduler& scheduler,
@@ -170,11 +173,12 @@
: mFlinger(flinger),
mScheduler(scheduler),
mTunables(tunables),
- mIdleTimer(std::chrono::duration_cast<std::chrono::milliseconds>(
- mTunables.mSamplingTimerTimeout),
- [] {}, [this] { checkForStaleLuma(); }),
+ mIdleTimer(
+ std::chrono::duration_cast<std::chrono::milliseconds>(
+ mTunables.mSamplingTimerTimeout),
+ [] {}, [this] { checkForStaleLuma(); }),
mPhaseCallback(std::make_unique<SamplingOffsetCallback>(*this, mScheduler,
- tunables.mSamplingOffset)),
+ tunables.mSamplingDuration)),
lastSampleTime(0ns) {
mThread = std::thread([this]() { threadMain(); });
pthread_setname_np(mThread.native_handle(), "RegionSamplingThread");
@@ -183,7 +187,7 @@
RegionSamplingThread::RegionSamplingThread(SurfaceFlinger& flinger, Scheduler& scheduler)
: RegionSamplingThread(flinger, scheduler,
- TimingTunables{defaultRegionSamplingOffset,
+ TimingTunables{defaultRegionSamplingWorkDuration,
defaultRegionSamplingPeriod,
defaultRegionSamplingTimerTimeout}) {}
diff --git a/services/surfaceflinger/RegionSamplingThread.h b/services/surfaceflinger/RegionSamplingThread.h
index b9b7a3c..0defdb3 100644
--- a/services/surfaceflinger/RegionSamplingThread.h
+++ b/services/surfaceflinger/RegionSamplingThread.h
@@ -43,10 +43,10 @@
class RegionSamplingThread : public IBinder::DeathRecipient {
public:
struct TimingTunables {
- // debug.sf.sampling_offset_ns
- // When asynchronously collecting sample, the offset, from zero phase in the vsync timeline
- // at which the sampling should start.
- std::chrono::nanoseconds mSamplingOffset;
+ // debug.sf.sampling_duration_ns
+ // When asynchronously collecting sample, the duration, at which the sampling should start
+ // before a vsync
+ std::chrono::nanoseconds mSamplingDuration;
// debug.sf.sampling_period_ns
// This is the maximum amount of time the luma recieving client
// should have to wait for a new luma value after a frame is updated. The inverse of this is
diff --git a/services/surfaceflinger/Scheduler/DispSync.h b/services/surfaceflinger/Scheduler/DispSync.h
index 7b2c9c3..f34aabc 100644
--- a/services/surfaceflinger/Scheduler/DispSync.h
+++ b/services/surfaceflinger/Scheduler/DispSync.h
@@ -32,32 +32,15 @@
class DispSync {
public:
- class Callback {
- public:
- Callback() = default;
- virtual ~Callback() = default;
- virtual void onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) = 0;
-
- protected:
- Callback(Callback const&) = delete;
- Callback& operator=(Callback const&) = delete;
- };
-
DispSync() = default;
virtual ~DispSync() = default;
- virtual void reset() = 0;
virtual bool addPresentFence(const std::shared_ptr<FenceTime>&) = 0;
virtual void beginResync() = 0;
virtual bool addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
bool* periodFlushed) = 0;
- virtual void endResync() = 0;
virtual void setPeriod(nsecs_t period) = 0;
virtual nsecs_t getPeriod() = 0;
- virtual status_t addEventListener(const char* name, nsecs_t phase, Callback* callback,
- nsecs_t lastCallbackTime) = 0;
- virtual status_t removeEventListener(Callback* callback, nsecs_t* outLastCallback) = 0;
- virtual status_t changePhaseOffset(Callback* callback, nsecs_t phase) = 0;
virtual nsecs_t computeNextRefresh(int periodOffset, nsecs_t now) const = 0;
virtual void setIgnorePresentFences(bool ignore) = 0;
virtual nsecs_t expectedPresentTime(nsecs_t now) = 0;
diff --git a/services/surfaceflinger/Scheduler/DispSyncSource.cpp b/services/surfaceflinger/Scheduler/DispSyncSource.cpp
index 8752b66..75f072d 100644
--- a/services/surfaceflinger/Scheduler/DispSyncSource.cpp
+++ b/services/surfaceflinger/Scheduler/DispSyncSource.cpp
@@ -14,10 +14,6 @@
* limitations under the License.
*/
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wconversion"
-
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
#include "DispSyncSource.h"
@@ -29,34 +25,129 @@
#include "DispSync.h"
#include "EventThread.h"
-namespace android {
+namespace android::scheduler {
using base::StringAppendF;
+using namespace std::chrono_literals;
-DispSyncSource::DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync,
+// The DispSync interface has a 'repeat this callback at rate' semantic. This object adapts
+// VSyncDispatch's individually-scheduled callbacks so as to meet DispSync's existing semantic
+// for now.
+class CallbackRepeater {
+public:
+ CallbackRepeater(VSyncDispatch& dispatch, VSyncDispatch::Callback cb, const char* name,
+ std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration,
+ std::chrono::nanoseconds notBefore)
+ : mName(name),
+ mCallback(cb),
+ mRegistration(dispatch,
+ std::bind(&CallbackRepeater::callback, this, std::placeholders::_1,
+ std::placeholders::_2, std::placeholders::_3),
+ mName),
+ mStarted(false),
+ mWorkDuration(workDuration),
+ mReadyDuration(readyDuration),
+ mLastCallTime(notBefore) {}
+
+ ~CallbackRepeater() {
+ std::lock_guard lock(mMutex);
+ mRegistration.cancel();
+ }
+
+ void start(std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration) {
+ std::lock_guard lock(mMutex);
+ mStarted = true;
+ mWorkDuration = workDuration;
+ mReadyDuration = readyDuration;
+
+ auto const scheduleResult =
+ mRegistration.schedule({.workDuration = mWorkDuration.count(),
+ .readyDuration = mReadyDuration.count(),
+ .earliestVsync = mLastCallTime.count()});
+ LOG_ALWAYS_FATAL_IF((scheduleResult != scheduler::ScheduleResult::Scheduled),
+ "Error scheduling callback: rc %X", scheduleResult);
+ }
+
+ void stop() {
+ std::lock_guard lock(mMutex);
+ LOG_ALWAYS_FATAL_IF(!mStarted, "DispSyncInterface misuse: callback already stopped");
+ mStarted = false;
+ mRegistration.cancel();
+ }
+
+ void dump(std::string& result) const {
+ std::lock_guard lock(mMutex);
+ const auto relativeLastCallTime =
+ mLastCallTime - std::chrono::steady_clock::now().time_since_epoch();
+ StringAppendF(&result, "\t%s: ", mName.c_str());
+ StringAppendF(&result, "mWorkDuration=%.2f mReadyDuration=%.2f last vsync time ",
+ mWorkDuration.count() / 1e6f, mReadyDuration.count() / 1e6f);
+ StringAppendF(&result, "%.2fms relative to now (%s)\n", relativeLastCallTime.count() / 1e6f,
+ mStarted ? "running" : "stopped");
+ }
+
+private:
+ void callback(nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
+ {
+ std::lock_guard lock(mMutex);
+ mLastCallTime = std::chrono::nanoseconds(vsyncTime);
+ }
+
+ mCallback(vsyncTime, wakeupTime, readyTime);
+
+ {
+ std::lock_guard lock(mMutex);
+ if (!mStarted) {
+ return;
+ }
+ auto const scheduleResult =
+ mRegistration.schedule({.workDuration = mWorkDuration.count(),
+ .readyDuration = mReadyDuration.count(),
+ .earliestVsync = vsyncTime});
+ LOG_ALWAYS_FATAL_IF((scheduleResult != ScheduleResult::Scheduled),
+ "Error rescheduling callback: rc %X", scheduleResult);
+ }
+ }
+
+ const std::string mName;
+ scheduler::VSyncDispatch::Callback mCallback;
+
+ mutable std::mutex mMutex;
+ VSyncCallbackRegistration mRegistration GUARDED_BY(mMutex);
+ bool mStarted GUARDED_BY(mMutex) = false;
+ std::chrono::nanoseconds mWorkDuration GUARDED_BY(mMutex) = 0ns;
+ std::chrono::nanoseconds mReadyDuration GUARDED_BY(mMutex) = 0ns;
+ std::chrono::nanoseconds mLastCallTime GUARDED_BY(mMutex) = 0ns;
+};
+
+DispSyncSource::DispSyncSource(scheduler::VSyncDispatch& vSyncDispatch,
+ std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration, bool traceVsync,
const char* name)
: mName(name),
mValue(base::StringPrintf("VSYNC-%s", name), 0),
mTraceVsync(traceVsync),
mVsyncOnLabel(base::StringPrintf("VsyncOn-%s", name)),
- mDispSync(dispSync),
- mPhaseOffset(base::StringPrintf("VsyncOffset-%s", name), phaseOffset) {}
+ mWorkDuration(base::StringPrintf("VsyncWorkDuration-%s", name), workDuration),
+ mReadyDuration(readyDuration) {
+ mCallbackRepeater =
+ std::make_unique<CallbackRepeater>(vSyncDispatch,
+ std::bind(&DispSyncSource::onVsyncCallback, this,
+ std::placeholders::_1,
+ std::placeholders::_2,
+ std::placeholders::_3),
+ name, workDuration, readyDuration,
+ std::chrono::steady_clock::now().time_since_epoch());
+}
+
+DispSyncSource::~DispSyncSource() = default;
void DispSyncSource::setVSyncEnabled(bool enable) {
std::lock_guard lock(mVsyncMutex);
if (enable) {
- status_t err = mDispSync->addEventListener(mName, mPhaseOffset,
- static_cast<DispSync::Callback*>(this),
- mLastCallbackTime);
- if (err != NO_ERROR) {
- ALOGE("error registering vsync callback: %s (%d)", strerror(-err), err);
- }
+ mCallbackRepeater->start(mWorkDuration, mReadyDuration);
// ATRACE_INT(mVsyncOnLabel.c_str(), 1);
} else {
- status_t err = mDispSync->removeEventListener(static_cast<DispSync::Callback*>(this),
- &mLastCallbackTime);
- if (err != NO_ERROR) {
- ALOGE("error unregistering vsync callback: %s (%d)", strerror(-err), err);
- }
+ mCallbackRepeater->stop();
// ATRACE_INT(mVsyncOnLabel.c_str(), 0);
}
mEnabled = enable;
@@ -67,32 +158,22 @@
mCallback = callback;
}
-void DispSyncSource::setPhaseOffset(nsecs_t phaseOffset) {
+void DispSyncSource::setDuration(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) {
std::lock_guard lock(mVsyncMutex);
- const nsecs_t period = mDispSync->getPeriod();
-
- // Normalize phaseOffset to [-period, period)
- const int numPeriods = phaseOffset / period;
- phaseOffset -= numPeriods * period;
- if (mPhaseOffset == phaseOffset) {
- return;
- }
-
- mPhaseOffset = phaseOffset;
+ mWorkDuration = workDuration;
+ mReadyDuration = readyDuration;
// If we're not enabled, we don't need to mess with the listeners
if (!mEnabled) {
return;
}
- status_t err =
- mDispSync->changePhaseOffset(static_cast<DispSync::Callback*>(this), mPhaseOffset);
- if (err != NO_ERROR) {
- ALOGE("error changing vsync offset: %s (%d)", strerror(-err), err);
- }
+ mCallbackRepeater->start(mWorkDuration, mReadyDuration);
}
-void DispSyncSource::onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) {
+void DispSyncSource::onVsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime,
+ nsecs_t readyTime) {
VSyncSource::Callback* callback;
{
std::lock_guard lock(mCallbackMutex);
@@ -104,17 +185,13 @@
}
if (callback != nullptr) {
- callback->onVSyncEvent(when, expectedVSyncTimestamp);
+ callback->onVSyncEvent(targetWakeupTime, vsyncTime, readyTime);
}
}
void DispSyncSource::dump(std::string& result) const {
std::lock_guard lock(mVsyncMutex);
StringAppendF(&result, "DispSyncSource: %s(%s)\n", mName, mEnabled ? "enabled" : "disabled");
- mDispSync->dump(result);
}
-} // namespace android
-
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic pop // ignored "-Wconversion"
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/DispSyncSource.h b/services/surfaceflinger/Scheduler/DispSyncSource.h
index 2aee3f6..2fce235 100644
--- a/services/surfaceflinger/Scheduler/DispSyncSource.h
+++ b/services/surfaceflinger/Scheduler/DispSyncSource.h
@@ -18,45 +18,47 @@
#include <mutex>
#include <string>
-#include "DispSync.h"
#include "EventThread.h"
#include "TracedOrdinal.h"
+#include "VSyncDispatch.h"
-namespace android {
+namespace android::scheduler {
+class CallbackRepeater;
-class DispSyncSource final : public VSyncSource, private DispSync::Callback {
+class DispSyncSource final : public VSyncSource {
public:
- DispSyncSource(DispSync* dispSync, nsecs_t phaseOffset, bool traceVsync, const char* name);
+ DispSyncSource(VSyncDispatch& vSyncDispatch, std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration, bool traceVsync, const char* name);
- ~DispSyncSource() override = default;
+ ~DispSyncSource() override;
// The following methods are implementation of VSyncSource.
const char* getName() const override { return mName; }
void setVSyncEnabled(bool enable) override;
void setCallback(VSyncSource::Callback* callback) override;
- void setPhaseOffset(nsecs_t phaseOffset) override;
+ void setDuration(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) override;
void dump(std::string&) const override;
private:
- // The following method is the implementation of the DispSync::Callback.
- void onDispSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) override;
+ void onVsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime);
const char* const mName;
TracedOrdinal<int> mValue;
const bool mTraceVsync;
const std::string mVsyncOnLabel;
- nsecs_t mLastCallbackTime GUARDED_BY(mVsyncMutex) = 0;
- DispSync* mDispSync;
+ std::unique_ptr<CallbackRepeater> mCallbackRepeater;
std::mutex mCallbackMutex;
VSyncSource::Callback* mCallback GUARDED_BY(mCallbackMutex) = nullptr;
mutable std::mutex mVsyncMutex;
- TracedOrdinal<nsecs_t> mPhaseOffset GUARDED_BY(mVsyncMutex);
+ TracedOrdinal<std::chrono::nanoseconds> mWorkDuration GUARDED_BY(mVsyncMutex);
+ std::chrono::nanoseconds mReadyDuration GUARDED_BY(mVsyncMutex);
bool mEnabled GUARDED_BY(mVsyncMutex) = false;
};
-} // namespace android
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index 846190c..559626b 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -200,9 +200,10 @@
mThread.join();
}
-void EventThread::setPhaseOffset(nsecs_t phaseOffset) {
+void EventThread::setDuration(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) {
std::lock_guard<std::mutex> lock(mMutex);
- mVSyncSource->setPhaseOffset(phaseOffset);
+ mVSyncSource->setDuration(workDuration, readyDuration);
}
sp<EventThreadConnection> EventThread::createEventConnection(
@@ -283,7 +284,8 @@
mCondition.notify_all();
}
-void EventThread::onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp) {
+void EventThread::onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp,
+ nsecs_t /*deadlineTimestamp*/) {
std::lock_guard<std::mutex> lock(mMutex);
LOG_FATAL_IF(!mVSyncState);
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index 49f624c..fa1ca64 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -57,7 +57,8 @@
class Callback {
public:
virtual ~Callback() {}
- virtual void onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) = 0;
+ virtual void onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp,
+ nsecs_t deadlineTimestamp) = 0;
};
virtual ~VSyncSource() {}
@@ -65,7 +66,8 @@
virtual const char* getName() const = 0;
virtual void setVSyncEnabled(bool enable) = 0;
virtual void setCallback(Callback* callback) = 0;
- virtual void setPhaseOffset(nsecs_t phaseOffset) = 0;
+ virtual void setDuration(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) = 0;
virtual void dump(std::string& result) const = 0;
};
@@ -116,7 +118,8 @@
virtual void dump(std::string& result) const = 0;
- virtual void setPhaseOffset(nsecs_t phaseOffset) = 0;
+ virtual void setDuration(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) = 0;
virtual status_t registerDisplayEventConnection(
const sp<EventThreadConnection>& connection) = 0;
@@ -157,7 +160,8 @@
void dump(std::string& result) const override;
- void setPhaseOffset(nsecs_t phaseOffset) override;
+ void setDuration(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) override;
size_t getEventThreadConnectionCount() override;
@@ -177,7 +181,8 @@
REQUIRES(mMutex);
// Implements VSyncSource::Callback
- void onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp) override;
+ void onVSyncEvent(nsecs_t timestamp, nsecs_t expectedVSyncTimestamp,
+ nsecs_t deadlineTimestamp) override;
const std::unique_ptr<VSyncSource> mVSyncSource GUARDED_BY(mMutex);
diff --git a/services/surfaceflinger/Scheduler/InjectVSyncSource.h b/services/surfaceflinger/Scheduler/InjectVSyncSource.h
index 975c9db..016b076 100644
--- a/services/surfaceflinger/Scheduler/InjectVSyncSource.h
+++ b/services/surfaceflinger/Scheduler/InjectVSyncSource.h
@@ -35,16 +35,17 @@
mCallback = callback;
}
- void onInjectSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) {
+ void onInjectSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp,
+ nsecs_t deadlineTimestamp) {
std::lock_guard<std::mutex> lock(mCallbackMutex);
if (mCallback) {
- mCallback->onVSyncEvent(when, expectedVSyncTimestamp);
+ mCallback->onVSyncEvent(when, expectedVSyncTimestamp, deadlineTimestamp);
}
}
const char* getName() const override { return "inject"; }
void setVSyncEnabled(bool) override {}
- void setPhaseOffset(nsecs_t) override {}
+ void setDuration(std::chrono::nanoseconds, std::chrono::nanoseconds) override {}
void dump(std::string&) const override {}
private:
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index 5f7b2c2..017872c 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -184,16 +184,18 @@
return *mVsyncSchedule.sync;
}
-std::unique_ptr<VSyncSource> Scheduler::makePrimaryDispSyncSource(const char* name,
- nsecs_t phaseOffsetNs) {
- return std::make_unique<DispSyncSource>(&getPrimaryDispSync(), phaseOffsetNs,
- true /* traceVsync */, name);
+std::unique_ptr<VSyncSource> Scheduler::makePrimaryDispSyncSource(
+ const char* name, std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration, bool traceVsync) {
+ return std::make_unique<scheduler::DispSyncSource>(*mVsyncSchedule.dispatch, workDuration,
+ readyDuration, traceVsync, name);
}
Scheduler::ConnectionHandle Scheduler::createConnection(
- const char* connectionName, nsecs_t phaseOffsetNs,
+ const char* connectionName, std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration,
impl::EventThread::InterceptVSyncsCallback interceptCallback) {
- auto vsyncSource = makePrimaryDispSyncSource(connectionName, phaseOffsetNs);
+ auto vsyncSource = makePrimaryDispSyncSource(connectionName, workDuration, readyDuration);
auto eventThread = std::make_unique<impl::EventThread>(std::move(vsyncSource),
std::move(interceptCallback));
return createConnection(std::move(eventThread));
@@ -286,9 +288,10 @@
mConnections.at(handle).thread->dump(result);
}
-void Scheduler::setPhaseOffset(ConnectionHandle handle, nsecs_t phaseOffset) {
+void Scheduler::setDuration(ConnectionHandle handle, std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration) {
RETURN_IF_INVALID_HANDLE(handle);
- mConnections[handle].thread->setPhaseOffset(phaseOffset);
+ mConnections[handle].thread->setDuration(workDuration, readyDuration);
}
void Scheduler::getDisplayStatInfo(DisplayStatInfo* stats) {
@@ -318,12 +321,12 @@
return mInjectorConnectionHandle;
}
-bool Scheduler::injectVSync(nsecs_t when, nsecs_t expectedVSyncTime) {
+bool Scheduler::injectVSync(nsecs_t when, nsecs_t expectedVSyncTime, nsecs_t deadlineTimestamp) {
if (!mInjectVSyncs || !mVSyncInjector) {
return false;
}
- mVSyncInjector->onInjectSyncEvent(when, expectedVSyncTime);
+ mVSyncInjector->onInjectSyncEvent(when, expectedVSyncTime, deadlineTimestamp);
return true;
}
@@ -340,7 +343,6 @@
std::lock_guard<std::mutex> lock(mHWVsyncLock);
if (mPrimaryHWVsyncEnabled) {
mSchedulerCallback.setVsyncEnabled(false);
- getPrimaryDispSync().endResync();
mPrimaryHWVsyncEnabled = false;
}
if (makeUnavailable) {
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index ed68b86..44d0d77 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -71,7 +71,9 @@
DispSync& getPrimaryDispSync();
using ConnectionHandle = scheduler::ConnectionHandle;
- ConnectionHandle createConnection(const char* connectionName, nsecs_t phaseOffsetNs,
+ ConnectionHandle createConnection(const char* connectionName,
+ std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration,
impl::EventThread::InterceptVSyncsCallback);
sp<IDisplayEventConnection> createDisplayEventConnection(ConnectionHandle,
@@ -88,17 +90,16 @@
void onScreenAcquired(ConnectionHandle);
void onScreenReleased(ConnectionHandle);
- // Modifies phase offset in the event thread.
- void setPhaseOffset(ConnectionHandle, nsecs_t phaseOffset);
+ // Modifies work duration in the event thread.
+ void setDuration(ConnectionHandle, std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration);
void getDisplayStatInfo(DisplayStatInfo* stats);
// Returns injector handle if injection has toggled, or an invalid handle otherwise.
ConnectionHandle enableVSyncInjection(bool enable);
-
// Returns false if injection is disabled.
- bool injectVSync(nsecs_t when, nsecs_t expectedVSyncTime);
-
+ bool injectVSync(nsecs_t when, nsecs_t expectedVSyncTime, nsecs_t deadlineTimestamp);
void enableHardwareVsync();
void disableHardwareVsync(bool makeUnavailable);
@@ -151,6 +152,11 @@
size_t getEventThreadConnectionCount(ConnectionHandle handle);
+ std::unique_ptr<VSyncSource> makePrimaryDispSyncSource(const char* name,
+ std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration,
+ bool traceVsync = true);
+
private:
friend class TestableScheduler;
@@ -186,8 +192,6 @@
static std::unique_ptr<LayerHistory> createLayerHistory(const scheduler::RefreshRateConfigs&,
bool useContentDetectionV2);
- std::unique_ptr<VSyncSource> makePrimaryDispSyncSource(const char* name, nsecs_t phaseOffsetNs);
-
// Create a connection on the given EventThread.
ConnectionHandle createConnection(std::unique_ptr<EventThread>);
sp<EventThreadConnection> createConnectionInternal(EventThread*,
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatch.h b/services/surfaceflinger/Scheduler/VSyncDispatch.h
index 2a2d7c5..0407900 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatch.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatch.h
@@ -40,11 +40,13 @@
/*
* A callback that can be registered to be awoken at a given time relative to a vsync event.
- * \param [in] vsyncTime The timestamp of the vsync the callback is for.
- * \param [in] targetWakeupTime The timestamp of intended wakeup time of the cb.
- *
+ * \param [in] vsyncTime: The timestamp of the vsync the callback is for.
+ * \param [in] targetWakeupTime: The timestamp of intended wakeup time of the cb.
+ * \param [in] readyTime: The timestamp of intended time where client needs to finish
+ * its work by.
*/
- using Callback = std::function<void(nsecs_t vsyncTime, nsecs_t targetWakeupTime)>;
+ using Callback =
+ std::function<void(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime)>;
/*
* Registers a callback that will be called at designated points on the vsync timeline.
@@ -71,33 +73,54 @@
virtual void unregisterCallback(CallbackToken token) = 0;
/*
+ * Timing information about a scheduled callback
+ *
+ * @workDuration: The time needed for the client to perform its work
+ * @readyDuration: The time needed for the client to be ready before a vsync event.
+ * For external (non-SF) clients, not only do we need to account for their
+ * workDuration, but we also need to account for the time SF will take to
+ * process their buffer/transaction. In this case, readyDuration will be set
+ * to the SF duration in order to provide enough end-to-end time, and to be
+ * able to provide the ready-by time (deadline) on the callback.
+ * For internal clients, we don't need to add additional padding, so
+ * readyDuration will typically be 0.
+ * @earliestVsync: The targeted display time. This will be snapped to the closest
+ * predicted vsync time after earliestVsync.
+ *
+ * callback will be dispatched at 'workDuration + readyDuration' nanoseconds before a vsync
+ * event.
+ */
+ struct ScheduleTiming {
+ nsecs_t workDuration = 0;
+ nsecs_t readyDuration = 0;
+ nsecs_t earliestVsync = 0;
+ };
+
+ /*
* Schedules the registered callback to be dispatched.
*
- * The callback will be dispatched at 'workDuration' nanoseconds before a vsync event.
+ * The callback will be dispatched at 'workDuration + readyDuration' nanoseconds before a vsync
+ * event.
*
* The caller designates the earliest vsync event that should be targeted by the earliestVsync
* parameter.
- * The callback will be scheduled at (workDuration - predictedVsync), where predictedVsync
- * is the first vsync event time where ( predictedVsync >= earliestVsync ).
+ * The callback will be scheduled at (workDuration + readyDuration - predictedVsync), where
+ * predictedVsync is the first vsync event time where ( predictedVsync >= earliestVsync ).
*
- * If (workDuration - earliestVsync) is in the past, or if a callback has already been
- * dispatched for the predictedVsync, an error will be returned.
+ * If (workDuration + readyDuration - earliestVsync) is in the past, or if a callback has
+ * already been dispatched for the predictedVsync, an error will be returned.
*
* It is valid to reschedule a callback to a different time.
*
* \param [in] token The callback to schedule.
- * \param [in] workDuration The time before the actual vsync time to invoke the callback
- * associated with token.
- * \param [in] earliestVsync The targeted display time. This will be snapped to the closest
- * predicted vsync time after earliestVsync.
+ * \param [in] scheduleTiming The timing information for this schedule call
* \return A ScheduleResult::Scheduled if callback was scheduled.
* A ScheduleResult::CannotSchedule
- * if (workDuration - earliestVsync) is in the past, or
- * if a callback was dispatched for the predictedVsync already.
- * A ScheduleResult::Error if there was another error.
+ * if (workDuration + readyDuration - earliestVsync) is in the past,
+ * or if a callback was dispatched for the predictedVsync already. A ScheduleResult::Error if
+ * there was another error.
*/
- virtual ScheduleResult schedule(CallbackToken token, nsecs_t workDuration,
- nsecs_t earliestVsync) = 0;
+ virtual ScheduleResult schedule(CallbackToken token, ScheduleTiming scheduleTiming) = 0;
/* Cancels a scheduled callback, if possible.
*
@@ -129,7 +152,7 @@
~VSyncCallbackRegistration();
// See documentation for VSyncDispatch::schedule.
- ScheduleResult schedule(nsecs_t workDuration, nsecs_t earliestVsync);
+ ScheduleResult schedule(VSyncDispatch::ScheduleTiming scheduleTiming);
// See documentation for VSyncDispatch::cancel.
CancelResult cancel();
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
index ef92680..2154a40 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
@@ -35,8 +35,6 @@
nsecs_t minVsyncDistance)
: mName(name),
mCallback(cb),
- mWorkDuration(0),
- mEarliestVsync(0),
mMinVsyncDistance(minVsyncDistance) {}
std::optional<nsecs_t> VSyncDispatchTimerQueueEntry::lastExecutedVsyncTarget() const {
@@ -54,6 +52,13 @@
return {mArmedInfo->mActualWakeupTime};
}
+std::optional<nsecs_t> VSyncDispatchTimerQueueEntry::readyTime() const {
+ if (!mArmedInfo) {
+ return {};
+ }
+ return {mArmedInfo->mActualReadyTime};
+}
+
std::optional<nsecs_t> VSyncDispatchTimerQueueEntry::targetVsync() const {
if (!mArmedInfo) {
return {};
@@ -61,10 +66,10 @@
return {mArmedInfo->mActualVsyncTime};
}
-ScheduleResult VSyncDispatchTimerQueueEntry::schedule(nsecs_t workDuration, nsecs_t earliestVsync,
+ScheduleResult VSyncDispatchTimerQueueEntry::schedule(VSyncDispatch::ScheduleTiming timing,
VSyncTracker& tracker, nsecs_t now) {
- auto nextVsyncTime =
- tracker.nextAnticipatedVSyncTimeFrom(std::max(earliestVsync, now + workDuration));
+ auto nextVsyncTime = tracker.nextAnticipatedVSyncTimeFrom(
+ std::max(timing.earliestVsync, now + timing.workDuration + timing.readyDuration));
bool const wouldSkipAVsyncTarget =
mArmedInfo && (nextVsyncTime > (mArmedInfo->mActualVsyncTime + mMinVsyncDistance));
@@ -80,16 +85,15 @@
tracker.nextAnticipatedVSyncTimeFrom(*mLastDispatchTime + mMinVsyncDistance);
}
- auto const nextWakeupTime = nextVsyncTime - workDuration;
- mWorkDuration = workDuration;
- mEarliestVsync = earliestVsync;
- mArmedInfo = {nextWakeupTime, nextVsyncTime};
+ auto const nextWakeupTime = nextVsyncTime - timing.workDuration - timing.readyDuration;
+ auto const nextReadyTime = nextVsyncTime - timing.readyDuration;
+ mScheduleTiming = timing;
+ mArmedInfo = {nextWakeupTime, nextVsyncTime, nextReadyTime};
return ScheduleResult::Scheduled;
}
-void VSyncDispatchTimerQueueEntry::addPendingWorkloadUpdate(nsecs_t workDuration,
- nsecs_t earliestVsync) {
- mWorkloadUpdateInfo = {.earliestVsync = earliestVsync, .duration = workDuration};
+void VSyncDispatchTimerQueueEntry::addPendingWorkloadUpdate(VSyncDispatch::ScheduleTiming timing) {
+ mWorkloadUpdateInfo = timing;
}
bool VSyncDispatchTimerQueueEntry::hasPendingWorkloadUpdate() const {
@@ -102,14 +106,18 @@
}
if (mWorkloadUpdateInfo) {
- mEarliestVsync = mWorkloadUpdateInfo->earliestVsync;
- mWorkDuration = mWorkloadUpdateInfo->duration;
+ mScheduleTiming = *mWorkloadUpdateInfo;
mWorkloadUpdateInfo.reset();
}
- auto const nextVsyncTime =
- tracker.nextAnticipatedVSyncTimeFrom(std::max(mEarliestVsync, now + mWorkDuration));
- mArmedInfo = {nextVsyncTime - mWorkDuration, nextVsyncTime};
+ const auto earliestReadyBy = now + mScheduleTiming.workDuration + mScheduleTiming.readyDuration;
+ const auto earliestVsync = std::max(earliestReadyBy, mScheduleTiming.earliestVsync);
+
+ const auto nextVsyncTime = tracker.nextAnticipatedVSyncTimeFrom(earliestVsync);
+ const auto nextReadyTime = nextVsyncTime - mScheduleTiming.readyDuration;
+ const auto nextWakeupTime = nextReadyTime - mScheduleTiming.workDuration;
+
+ mArmedInfo = {nextWakeupTime, nextVsyncTime, nextReadyTime};
}
void VSyncDispatchTimerQueueEntry::disarm() {
@@ -122,13 +130,14 @@
return *mLastDispatchTime;
}
-void VSyncDispatchTimerQueueEntry::callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp) {
+void VSyncDispatchTimerQueueEntry::callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp,
+ nsecs_t deadlineTimestamp) {
{
std::lock_guard<std::mutex> lk(mRunningMutex);
mRunning = true;
}
- mCallback(vsyncTimestamp, wakeupTimestamp);
+ mCallback(vsyncTimestamp, wakeupTimestamp, deadlineTimestamp);
std::lock_guard<std::mutex> lk(mRunningMutex);
mRunning = false;
@@ -144,15 +153,20 @@
std::lock_guard<std::mutex> lk(mRunningMutex);
std::string armedInfo;
if (mArmedInfo) {
- StringAppendF(&armedInfo, "[wake up in %.2fms for vsync %.2fms from now]",
+ StringAppendF(&armedInfo,
+ "[wake up in %.2fms deadline in %.2fms for vsync %.2fms from now]",
(mArmedInfo->mActualWakeupTime - systemTime()) / 1e6f,
+ (mArmedInfo->mActualReadyTime - systemTime()) / 1e6f,
(mArmedInfo->mActualVsyncTime - systemTime()) / 1e6f);
}
StringAppendF(&result, "\t\t%s: %s %s\n", mName.c_str(),
mRunning ? "(in callback function)" : "", armedInfo.c_str());
- StringAppendF(&result, "\t\t\tmWorkDuration: %.2fms mEarliestVsync: %.2fms relative to now\n",
- mWorkDuration / 1e6f, (mEarliestVsync - systemTime()) / 1e6f);
+ StringAppendF(&result,
+ "\t\t\tworkDuration: %.2fms readyDuration: %.2fms earliestVsync: %.2fms relative "
+ "to now\n",
+ mScheduleTiming.workDuration / 1e6f, mScheduleTiming.readyDuration / 1e6f,
+ (mScheduleTiming.earliestVsync - systemTime()) / 1e6f);
if (mLastDispatchTime) {
StringAppendF(&result, "\t\t\tmLastDispatchTime: %.2fms ago\n",
@@ -239,6 +253,7 @@
std::shared_ptr<VSyncDispatchTimerQueueEntry> callback;
nsecs_t vsyncTimestamp;
nsecs_t wakeupTimestamp;
+ nsecs_t deadlineTimestamp;
};
std::vector<Invocation> invocations;
{
@@ -252,11 +267,13 @@
continue;
}
+ auto const readyTime = callback->readyTime();
+
auto const lagAllowance = std::max(now - mIntendedWakeupTime, static_cast<nsecs_t>(0));
if (*wakeupTime < mIntendedWakeupTime + mTimerSlack + lagAllowance) {
callback->executing();
- invocations.emplace_back(
- Invocation{callback, *callback->lastExecutedVsyncTarget(), *wakeupTime});
+ invocations.emplace_back(Invocation{callback, *callback->lastExecutedVsyncTarget(),
+ *wakeupTime, *readyTime});
}
}
@@ -265,7 +282,8 @@
}
for (auto const& invocation : invocations) {
- invocation.callback->callback(invocation.vsyncTimestamp, invocation.wakeupTimestamp);
+ invocation.callback->callback(invocation.vsyncTimestamp, invocation.wakeupTimestamp,
+ invocation.deadlineTimestamp);
}
}
@@ -297,8 +315,8 @@
}
}
-ScheduleResult VSyncDispatchTimerQueue::schedule(CallbackToken token, nsecs_t workDuration,
- nsecs_t earliestVsync) {
+ScheduleResult VSyncDispatchTimerQueue::schedule(CallbackToken token,
+ ScheduleTiming scheduleTiming) {
auto result = ScheduleResult::Error;
{
std::lock_guard<decltype(mMutex)> lk(mMutex);
@@ -314,11 +332,11 @@
* timer recalculation to avoid cancelling a callback that is about to fire. */
auto const rearmImminent = now > mIntendedWakeupTime;
if (CC_UNLIKELY(rearmImminent)) {
- callback->addPendingWorkloadUpdate(workDuration, earliestVsync);
+ callback->addPendingWorkloadUpdate(scheduleTiming);
return ScheduleResult::Scheduled;
}
- result = callback->schedule(workDuration, earliestVsync, mTracker, now);
+ result = callback->schedule(scheduleTiming, mTracker, now);
if (result == ScheduleResult::CannotSchedule) {
return result;
}
@@ -396,11 +414,11 @@
if (mValidToken) mDispatch.get().unregisterCallback(mToken);
}
-ScheduleResult VSyncCallbackRegistration::schedule(nsecs_t workDuration, nsecs_t earliestVsync) {
+ScheduleResult VSyncCallbackRegistration::schedule(VSyncDispatch::ScheduleTiming scheduleTiming) {
if (!mValidToken) {
return ScheduleResult::Error;
}
- return mDispatch.get().schedule(mToken, workDuration, earliestVsync);
+ return mDispatch.get().schedule(mToken, scheduleTiming);
}
CancelResult VSyncCallbackRegistration::cancel() {
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
index 957c0d1..26237b6 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
@@ -47,7 +47,7 @@
std::optional<nsecs_t> lastExecutedVsyncTarget() const;
// This moves the state from disarmed->armed and will calculate the wakeupTime.
- ScheduleResult schedule(nsecs_t workDuration, nsecs_t earliestVsync, VSyncTracker& tracker,
+ ScheduleResult schedule(VSyncDispatch::ScheduleTiming timing, VSyncTracker& tracker,
nsecs_t now);
// This will update armed entries with the latest vsync information. Entry remains armed.
void update(VSyncTracker& tracker, nsecs_t now);
@@ -56,6 +56,8 @@
// It will not update the wakeupTime.
std::optional<nsecs_t> wakeupTime() const;
+ std::optional<nsecs_t> readyTime() const;
+
std::optional<nsecs_t> targetVsync() const;
// This moves state from armed->disarmed.
@@ -67,14 +69,14 @@
// Adds a pending upload of the earliestVSync and workDuration that will be applied on the next
// call to update()
- void addPendingWorkloadUpdate(nsecs_t workDuration, nsecs_t earliestVsync);
+ void addPendingWorkloadUpdate(VSyncDispatch::ScheduleTiming);
// Checks if there is a pending update to the workload, returning true if so.
bool hasPendingWorkloadUpdate() const;
// End: functions that are not threadsafe.
// Invoke the callback with the two given timestamps, moving the state from running->disarmed.
- void callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp);
+ void callback(nsecs_t vsyncTimestamp, nsecs_t wakeupTimestamp, nsecs_t deadlineTimestamp);
// Block calling thread while the callback is executing.
void ensureNotRunning();
@@ -84,22 +86,18 @@
std::string const mName;
VSyncDispatch::Callback const mCallback;
- nsecs_t mWorkDuration;
- nsecs_t mEarliestVsync;
+ VSyncDispatch::ScheduleTiming mScheduleTiming;
nsecs_t const mMinVsyncDistance;
struct ArmingInfo {
nsecs_t mActualWakeupTime;
nsecs_t mActualVsyncTime;
+ nsecs_t mActualReadyTime;
};
std::optional<ArmingInfo> mArmedInfo;
std::optional<nsecs_t> mLastDispatchTime;
- struct WorkloadUpdateInfo {
- nsecs_t duration;
- nsecs_t earliestVsync;
- };
- std::optional<WorkloadUpdateInfo> mWorkloadUpdateInfo;
+ std::optional<VSyncDispatch::ScheduleTiming> mWorkloadUpdateInfo;
mutable std::mutex mRunningMutex;
std::condition_variable mCv;
@@ -125,7 +123,7 @@
CallbackToken registerCallback(Callback const& callbackFn, std::string callbackName) final;
void unregisterCallback(CallbackToken token) final;
- ScheduleResult schedule(CallbackToken token, nsecs_t workDuration, nsecs_t earliestVsync) final;
+ ScheduleResult schedule(CallbackToken token, ScheduleTiming scheduleTiming) final;
CancelResult cancel(CallbackToken token) final;
void dump(std::string& result) const final;
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index 61f3fbb..e90edf7 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -14,10 +14,6 @@
* limitations under the License.
*/
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wconversion"
-
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
//#define LOG_NDEBUG 0
#include "VSyncPredictor.h"
@@ -54,7 +50,7 @@
}
}
-inline size_t VSyncPredictor::next(int i) const {
+inline size_t VSyncPredictor::next(size_t i) const {
return (i + 1) % mTimestamps.size();
}
@@ -69,12 +65,12 @@
}
nsecs_t VSyncPredictor::currentPeriod() const {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
return std::get<0>(mRateMap.find(mIdealPeriod)->second);
}
bool VSyncPredictor::addVsyncTimestamp(nsecs_t timestamp) {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
if (!validate(timestamp)) {
// VSR could elect to ignore the incongruent timestamp or resetModel(). If ts is ignored,
@@ -138,14 +134,14 @@
auto meanTS = scheduler::calculate_mean(vsyncTS);
auto meanOrdinal = scheduler::calculate_mean(ordinals);
- for (auto i = 0; i < vsyncTS.size(); i++) {
+ for (size_t i = 0; i < vsyncTS.size(); i++) {
vsyncTS[i] -= meanTS;
ordinals[i] -= meanOrdinal;
}
auto top = 0ll;
auto bottom = 0ll;
- for (auto i = 0; i < vsyncTS.size(); i++) {
+ for (size_t i = 0; i < vsyncTS.size(); i++) {
top += vsyncTS[i] * ordinals[i];
bottom += ordinals[i] * ordinals[i];
}
@@ -177,9 +173,9 @@
}
nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
- auto const [slope, intercept] = getVSyncPredictionModel(lk);
+ auto const [slope, intercept] = getVSyncPredictionModel(lock);
if (mTimestamps.empty()) {
traceInt64If("VSP-mode", 1);
@@ -215,8 +211,8 @@
}
std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel() const {
- std::lock_guard<std::mutex> lk(mMutex);
- return VSyncPredictor::getVSyncPredictionModel(lk);
+ std::lock_guard lock(mMutex);
+ return VSyncPredictor::getVSyncPredictionModel(lock);
}
std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel(
@@ -227,7 +223,7 @@
void VSyncPredictor::setPeriod(nsecs_t period) {
ATRACE_CALL();
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
static constexpr size_t kSizeLimit = 30;
if (CC_UNLIKELY(mRateMap.size() == kSizeLimit)) {
mRateMap.erase(mRateMap.begin());
@@ -256,18 +252,18 @@
}
bool VSyncPredictor::needsMoreSamples() const {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
return mTimestamps.size() < kMinimumSamplesForPrediction;
}
void VSyncPredictor::resetModel() {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
mRateMap[mIdealPeriod] = {mIdealPeriod, 0};
clearTimestamps();
}
void VSyncPredictor::dump(std::string& result) const {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
StringAppendF(&result, "\tmIdealPeriod=%.2f\n", mIdealPeriod / 1e6f);
StringAppendF(&result, "\tRefresh Rate Map:\n");
for (const auto& [idealPeriod, periodInterceptTuple] : mRateMap) {
@@ -280,5 +276,3 @@
} // namespace android::scheduler
-// TODO(b/129481165): remove the #pragma below and fix conversion issues
-#pragma clang diagnostic pop // ignored "-Wconversion"
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
index 5f3c418..5f2ec49 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.h
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -74,7 +74,7 @@
size_t const kOutlierTolerancePercent;
std::mutex mutable mMutex;
- size_t next(int i) const REQUIRES(mMutex);
+ size_t next(size_t i) const REQUIRES(mMutex);
bool validate(nsecs_t timestamp) const REQUIRES(mMutex);
std::tuple<nsecs_t, nsecs_t> getVSyncPredictionModel(std::lock_guard<std::mutex> const&) const
REQUIRES(mMutex);
@@ -84,7 +84,7 @@
std::unordered_map<nsecs_t, std::tuple<nsecs_t, nsecs_t>> mutable mRateMap GUARDED_BY(mMutex);
- int mLastTimestampIndex GUARDED_BY(mMutex) = 0;
+ size_t mLastTimestampIndex GUARDED_BY(mMutex) = 0;
std::vector<nsecs_t> mTimestamps GUARDED_BY(mMutex);
};
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.cpp b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
index a2b279b..792313e 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
@@ -38,20 +38,20 @@
class PredictedVsyncTracer {
public:
PredictedVsyncTracer(VSyncDispatch& dispatch)
- : mRegistration(dispatch,
- std::bind(&PredictedVsyncTracer::callback, this, std::placeholders::_1,
- std::placeholders::_2),
+ : mRegistration(dispatch, std::bind(&PredictedVsyncTracer::callback, this),
"PredictedVsyncTracer") {
- mRegistration.schedule(0, 0);
+ scheduleRegistration();
}
private:
TracedOrdinal<bool> mParity = {"VSYNC-predicted", 0};
VSyncCallbackRegistration mRegistration;
- void callback(nsecs_t /*vsyncTime*/, nsecs_t /*targetWakeupTim*/) {
+ void scheduleRegistration() { mRegistration.schedule({0, 0, 0}); }
+
+ void callback() {
mParity = !mParity;
- mRegistration.schedule(0, 0);
+ scheduleRegistration();
}
};
@@ -69,96 +69,6 @@
VSyncReactor::~VSyncReactor() = default;
-// The DispSync interface has a 'repeat this callback at rate' semantic. This object adapts
-// VSyncDispatch's individually-scheduled callbacks so as to meet DispSync's existing semantic
-// for now.
-class CallbackRepeater {
-public:
- CallbackRepeater(VSyncDispatch& dispatch, DispSync::Callback* cb, const char* name,
- nsecs_t period, nsecs_t offset, nsecs_t notBefore)
- : mName(name),
- mCallback(cb),
- mRegistration(dispatch,
- std::bind(&CallbackRepeater::callback, this, std::placeholders::_1,
- std::placeholders::_2),
- mName),
- mPeriod(period),
- mOffset(offset),
- mLastCallTime(notBefore) {}
-
- ~CallbackRepeater() {
- std::lock_guard<std::mutex> lk(mMutex);
- mRegistration.cancel();
- }
-
- void start(nsecs_t offset) {
- std::lock_guard<std::mutex> lk(mMutex);
- mStopped = false;
- mOffset = offset;
-
- auto const schedule_result = mRegistration.schedule(calculateWorkload(), mLastCallTime);
- LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
- "Error scheduling callback: rc %X", schedule_result);
- }
-
- void setPeriod(nsecs_t period) {
- std::lock_guard<std::mutex> lk(mMutex);
- if (period == mPeriod) {
- return;
- }
- mPeriod = period;
- }
-
- void stop() {
- std::lock_guard<std::mutex> lk(mMutex);
- LOG_ALWAYS_FATAL_IF(mStopped, "DispSyncInterface misuse: callback already stopped");
- mStopped = true;
- mRegistration.cancel();
- }
-
- void dump(std::string& result) const {
- std::lock_guard<std::mutex> lk(mMutex);
- StringAppendF(&result, "\t%s: mPeriod=%.2f last vsync time %.2fms relative to now (%s)\n",
- mName.c_str(), mPeriod / 1e6f, (mLastCallTime - systemTime()) / 1e6f,
- mStopped ? "stopped" : "running");
- }
-
-private:
- void callback(nsecs_t vsynctime, nsecs_t wakeupTime) {
- {
- std::lock_guard<std::mutex> lk(mMutex);
- mLastCallTime = vsynctime;
- }
-
- mCallback->onDispSyncEvent(wakeupTime, vsynctime);
-
- {
- std::lock_guard<std::mutex> lk(mMutex);
- if (mStopped) {
- return;
- }
- auto const schedule_result = mRegistration.schedule(calculateWorkload(), vsynctime);
- LOG_ALWAYS_FATAL_IF((schedule_result != ScheduleResult::Scheduled),
- "Error rescheduling callback: rc %X", schedule_result);
- }
- }
-
- // DispSync offsets are defined as time after the vsync before presentation.
- // VSyncReactor workloads are defined as time before the intended presentation vsync.
- // Note change in sign between the two defnitions.
- nsecs_t calculateWorkload() REQUIRES(mMutex) { return mPeriod - mOffset; }
-
- const std::string mName;
- DispSync::Callback* const mCallback;
-
- std::mutex mutable mMutex;
- VSyncCallbackRegistration mRegistration GUARDED_BY(mMutex);
- bool mStopped GUARDED_BY(mMutex) = false;
- nsecs_t mPeriod GUARDED_BY(mMutex);
- nsecs_t mOffset GUARDED_BY(mMutex);
- nsecs_t mLastCallTime GUARDED_BY(mMutex);
-};
-
bool VSyncReactor::addPresentFence(const std::shared_ptr<FenceTime>& fence) {
if (!fence) {
return false;
@@ -169,7 +79,7 @@
return true;
}
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
if (mExternalIgnoreFences || mInternalIgnoreFences) {
return true;
}
@@ -207,7 +117,7 @@
}
void VSyncReactor::setIgnorePresentFences(bool ignoration) {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
mExternalIgnoreFences = ignoration;
updateIgnorePresentFencesInternal();
}
@@ -269,8 +179,6 @@
mTracker.resetModel();
}
-void VSyncReactor::endResync() {}
-
bool VSyncReactor::periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> HwcVsyncPeriod) {
if (!mPeriodConfirmationInProgress) {
return false;
@@ -303,14 +211,11 @@
bool* periodFlushed) {
assert(periodFlushed);
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
ATRACE_NAME("VSR: period confirmed");
if (mPeriodTransitioningTo) {
mTracker.setPeriod(*mPeriodTransitioningTo);
- for (auto& entry : mCallbacks) {
- entry.second->setPeriod(*mPeriodTransitioningTo);
- }
*periodFlushed = true;
}
@@ -339,51 +244,8 @@
return mMoreSamplesNeeded;
}
-status_t VSyncReactor::addEventListener(const char* name, nsecs_t phase,
- DispSync::Callback* callback,
- nsecs_t /* lastCallbackTime */) {
- std::lock_guard<std::mutex> lk(mMutex);
- auto it = mCallbacks.find(callback);
- if (it == mCallbacks.end()) {
- // TODO (b/146557561): resolve lastCallbackTime semantics in DispSync i/f.
- static auto constexpr maxListeners = 4;
- if (mCallbacks.size() >= maxListeners) {
- ALOGE("callback %s not added, exceeded callback limit of %i (currently %zu)", name,
- maxListeners, mCallbacks.size());
- return NO_MEMORY;
- }
-
- auto const period = mTracker.currentPeriod();
- auto repeater = std::make_unique<CallbackRepeater>(mDispatch, callback, name, period, phase,
- mClock->now());
- it = mCallbacks.emplace(std::pair(callback, std::move(repeater))).first;
- }
-
- it->second->start(phase);
- return NO_ERROR;
-}
-
-status_t VSyncReactor::removeEventListener(DispSync::Callback* callback,
- nsecs_t* /* outLastCallback */) {
- std::lock_guard<std::mutex> lk(mMutex);
- auto const it = mCallbacks.find(callback);
- LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback %p not registered", callback);
-
- it->second->stop();
- return NO_ERROR;
-}
-
-status_t VSyncReactor::changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) {
- std::lock_guard<std::mutex> lk(mMutex);
- auto const it = mCallbacks.find(callback);
- LOG_ALWAYS_FATAL_IF(it == mCallbacks.end(), "callback was %p not registered", callback);
-
- it->second->start(phase);
- return NO_ERROR;
-}
-
void VSyncReactor::dump(std::string& result) const {
- std::lock_guard<std::mutex> lk(mMutex);
+ std::lock_guard lock(mMutex);
StringAppendF(&result, "VsyncReactor in use\n");
StringAppendF(&result, "Has %zu unfired fences\n", mUnfiredFences.size());
StringAppendF(&result, "mInternalIgnoreFences=%d mExternalIgnoreFences=%d\n",
@@ -403,17 +265,10 @@
StringAppendF(&result, "No Last HW vsync\n");
}
- StringAppendF(&result, "CallbackRepeaters:\n");
- for (const auto& [callback, repeater] : mCallbacks) {
- repeater->dump(result);
- }
-
StringAppendF(&result, "VSyncTracker:\n");
mTracker.dump(result);
StringAppendF(&result, "VSyncDispatch:\n");
mDispatch.dump(result);
}
-void VSyncReactor::reset() {}
-
} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.h b/services/surfaceflinger/Scheduler/VSyncReactor.h
index 22ceb39..c6f2777 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.h
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.h
@@ -29,7 +29,6 @@
class Clock;
class VSyncDispatch;
class VSyncTracker;
-class CallbackRepeater;
class PredictedVsyncTracer;
// TODO (b/145217110): consider renaming.
@@ -52,15 +51,8 @@
void beginResync() final;
bool addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
bool* periodFlushed) final;
- void endResync() final;
-
- status_t addEventListener(const char* name, nsecs_t phase, DispSync::Callback* callback,
- nsecs_t lastCallbackTime) final;
- status_t removeEventListener(DispSync::Callback* callback, nsecs_t* outLastCallback) final;
- status_t changePhaseOffset(DispSync::Callback* callback, nsecs_t phase) final;
void dump(std::string& result) const final;
- void reset() final;
private:
void setIgnorePresentFencesInternal(bool ignoration) REQUIRES(mMutex);
@@ -85,9 +77,6 @@
std::optional<nsecs_t> mPeriodTransitioningTo GUARDED_BY(mMutex);
std::optional<nsecs_t> mLastHwVsync GUARDED_BY(mMutex);
- std::unordered_map<DispSync::Callback*, std::unique_ptr<CallbackRepeater>> mCallbacks
- GUARDED_BY(mMutex);
-
const std::unique_ptr<PredictedVsyncTracer> mPredictedVsyncTracer;
const bool mSupportKernelIdleTimer = false;
};
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 2b642a2..b30b0ec 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1452,7 +1452,11 @@
status_t SurfaceFlinger::injectVSync(nsecs_t when) {
Mutex::Autolock lock(mStateLock);
- return mScheduler->injectVSync(when, calculateExpectedPresentTime(when)) ? NO_ERROR : BAD_VALUE;
+ const auto expectedPresent = calculateExpectedPresentTime(when);
+ return mScheduler->injectVSync(when, /*expectedVSyncTime=*/expectedPresent,
+ /*deadlineTimestamp=*/expectedPresent)
+ ? NO_ERROR
+ : BAD_VALUE;
}
status_t SurfaceFlinger::getLayerDebugInfo(std::vector<LayerDebugInfo>* outLayers) {
@@ -2984,14 +2988,16 @@
// start the EventThread
mScheduler = getFactory().createScheduler(*mRefreshRateConfigs, *this);
+ const auto configs = mVsyncConfiguration->getCurrentConfigs();
mAppConnectionHandle =
mScheduler->createConnection("app",
- mVsyncConfiguration->getCurrentConfigs().late.appOffset,
+ /*workDuration=*/configs.late.appWorkDuration,
+ /*readyDuration=*/configs.late.sfWorkDuration,
impl::EventThread::InterceptVSyncsCallback());
mSfConnectionHandle =
mScheduler->createConnection("sf",
- mVsyncConfiguration->getCurrentConfigs().late.sfOffset,
- [this](nsecs_t timestamp) {
+ /*workDuration=*/configs.late.sfWorkDuration,
+ /*readyDuration=*/0ns, [this](nsecs_t timestamp) {
mInterceptor->saveVSyncEvent(timestamp);
});
@@ -3024,8 +3030,12 @@
}
void SurfaceFlinger::setVsyncConfig(const VsyncModulator::VsyncConfig& config) {
- mScheduler->setPhaseOffset(mAppConnectionHandle, config.appOffset);
- mScheduler->setPhaseOffset(mSfConnectionHandle, config.sfOffset);
+ mScheduler->setDuration(mAppConnectionHandle,
+ /*workDuration=*/config.appWorkDuration,
+ /*readyDuration=*/config.sfWorkDuration);
+ mScheduler->setDuration(mSfConnectionHandle,
+ /*workDuration=*/config.sfWorkDuration,
+ /*readyDuration=*/0ns);
}
void SurfaceFlinger::commitTransaction() {
@@ -5170,14 +5180,14 @@
mForceFullDamage = n != 0;
return NO_ERROR;
}
- case 1018: { // Modify Choreographer's phase offset
+ case 1018: { // Modify Choreographer's duration
n = data.readInt32();
- mScheduler->setPhaseOffset(mAppConnectionHandle, static_cast<nsecs_t>(n));
+ mScheduler->setDuration(mAppConnectionHandle, std::chrono::nanoseconds(n), 0ns);
return NO_ERROR;
}
- case 1019: { // Modify SurfaceFlinger's phase offset
+ case 1019: { // Modify SurfaceFlinger's duration
n = data.readInt32();
- mScheduler->setPhaseOffset(mSfConnectionHandle, static_cast<nsecs_t>(n));
+ mScheduler->setDuration(mSfConnectionHandle, std::chrono::nanoseconds(n), 0ns);
return NO_ERROR;
}
case 1020: { // Layer updates interceptor
diff --git a/services/surfaceflinger/TracedOrdinal.h b/services/surfaceflinger/TracedOrdinal.h
index 4e7f67d..49cf80c 100644
--- a/services/surfaceflinger/TracedOrdinal.h
+++ b/services/surfaceflinger/TracedOrdinal.h
@@ -21,12 +21,32 @@
#include <cmath>
#include <string>
+namespace std {
+template <class Rep, class Period>
+bool signbit(std::chrono::duration<Rep, Period> v) {
+ return signbit(std::chrono::duration_cast<std::chrono::nanoseconds>(v).count());
+}
+} // namespace std
+
namespace android {
+namespace {
+template <typename T>
+int64_t to_int64(T v) {
+ return int64_t(v);
+}
+
+template <class Rep, class Period>
+int64_t to_int64(std::chrono::duration<Rep, Period> v) {
+ return int64_t(v.count());
+}
+} // namespace
+
template <typename T>
class TracedOrdinal {
public:
- static_assert(std::is_same<bool, T>() || (std::is_signed<T>() && std::is_integral<T>()),
+ static_assert(std::is_same<bool, T>() || (std::is_signed<T>() && std::is_integral<T>()) ||
+ std::is_same<std::chrono::nanoseconds, T>(),
"Type is not supported. Please test it with systrace before adding "
"it to the list.");
@@ -57,12 +77,12 @@
}
if (!std::signbit(mData)) {
- ATRACE_INT64(mName.c_str(), int64_t(mData));
+ ATRACE_INT64(mName.c_str(), to_int64(mData));
if (mHasGoneNegative) {
ATRACE_INT64(mNameNegative.c_str(), 0);
}
} else {
- ATRACE_INT64(mNameNegative.c_str(), -int64_t(mData));
+ ATRACE_INT64(mNameNegative.c_str(), -to_int64(mData));
ATRACE_INT64(mName.c_str(), 0);
}
}
diff --git a/services/surfaceflinger/tests/unittests/DispSyncSourceTest.cpp b/services/surfaceflinger/tests/unittests/DispSyncSourceTest.cpp
index afebc40..54f4c7c 100644
--- a/services/surfaceflinger/tests/unittests/DispSyncSourceTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DispSyncSourceTest.cpp
@@ -27,13 +27,99 @@
#include "AsyncCallRecorder.h"
#include "Scheduler/DispSyncSource.h"
-#include "mock/MockDispSync.h"
+#include "Scheduler/VSyncDispatch.h"
namespace android {
namespace {
using namespace std::chrono_literals;
-using testing::Return;
+using namespace testing;
+
+class MockVSyncDispatch : public scheduler::VSyncDispatch {
+public:
+ MOCK_METHOD2(registerCallback,
+ CallbackToken(std::function<void(nsecs_t, nsecs_t, nsecs_t)> const&, std::string));
+ MOCK_METHOD1(unregisterCallback, void(CallbackToken));
+ MOCK_METHOD2(schedule, scheduler::ScheduleResult(CallbackToken, ScheduleTiming));
+ MOCK_METHOD1(cancel, scheduler::CancelResult(CallbackToken token));
+ MOCK_CONST_METHOD1(dump, void(std::string&));
+
+ MockVSyncDispatch() {
+ ON_CALL(*this, registerCallback)
+ .WillByDefault(
+ [this](std::function<void(nsecs_t, nsecs_t, nsecs_t)> const& callback,
+ std::string) {
+ CallbackToken token(mNextToken);
+ mNextToken++;
+
+ mCallbacks.emplace(token, CallbackData(callback));
+ ALOGD("registerCallback: %zu", token.value());
+ return token;
+ });
+
+ ON_CALL(*this, unregisterCallback).WillByDefault([this](CallbackToken token) {
+ ALOGD("unregisterCallback: %zu", token.value());
+ mCallbacks.erase(token);
+ });
+
+ ON_CALL(*this, schedule).WillByDefault([this](CallbackToken token, ScheduleTiming timing) {
+ ALOGD("schedule: %zu", token.value());
+ if (mCallbacks.count(token) == 0) {
+ ALOGD("schedule: callback %zu not registered", token.value());
+ return scheduler::ScheduleResult::Error;
+ }
+
+ auto& callback = mCallbacks.at(token);
+ callback.scheduled = true;
+ callback.vsyncTime = timing.earliestVsync;
+ callback.targetWakeupTime =
+ timing.earliestVsync - timing.workDuration - timing.readyDuration;
+ ALOGD("schedule: callback %zu scheduled", token.value());
+ return scheduler::ScheduleResult::Scheduled;
+ });
+
+ ON_CALL(*this, cancel).WillByDefault([this](CallbackToken token) {
+ ALOGD("cancel: %zu", token.value());
+ if (mCallbacks.count(token) == 0) {
+ ALOGD("cancel: callback %zu is not registered", token.value());
+ return scheduler::CancelResult::Error;
+ }
+
+ auto& callback = mCallbacks.at(token);
+ callback.scheduled = false;
+ ALOGD("cancel: callback %zu cancelled", token.value());
+ return scheduler::CancelResult::Cancelled;
+ });
+ }
+
+ void triggerCallbacks() {
+ ALOGD("triggerCallbacks");
+ for (auto& [token, callback] : mCallbacks) {
+ if (callback.scheduled) {
+ ALOGD("triggerCallbacks: callback %zu", token.value());
+ callback.scheduled = false;
+ callback.func(callback.vsyncTime, callback.targetWakeupTime, callback.readyTime);
+ } else {
+ ALOGD("triggerCallbacks: callback %zu is not scheduled", token.value());
+ }
+ }
+ }
+
+private:
+ struct CallbackData {
+ explicit CallbackData(std::function<void(nsecs_t, nsecs_t, nsecs_t)> func)
+ : func(std::move(func)) {}
+
+ std::function<void(nsecs_t, nsecs_t, nsecs_t)> func;
+ bool scheduled = false;
+ nsecs_t vsyncTime = 0;
+ nsecs_t targetWakeupTime = 0;
+ nsecs_t readyTime = 0;
+ };
+
+ std::unordered_map<CallbackToken, CallbackData> mCallbacks;
+ size_t mNextToken;
+};
class DispSyncSourceTest : public testing::Test, private VSyncSource::Callback {
protected:
@@ -43,15 +129,19 @@
void createDispSync();
void createDispSyncSource();
- void onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp) override;
+ void onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp,
+ nsecs_t deadlineTimestamp) override;
- std::unique_ptr<mock::DispSync> mDispSync;
- std::unique_ptr<DispSyncSource> mDispSyncSource;
+ std::unique_ptr<MockVSyncDispatch> mVSyncDispatch;
+ std::unique_ptr<scheduler::DispSyncSource> mDispSyncSource;
- AsyncCallRecorder<void (*)(nsecs_t)> mVSyncEventCallRecorder;
+ AsyncCallRecorder<void (*)(nsecs_t, nsecs_t, nsecs_t)> mVSyncEventCallRecorder;
- static constexpr std::chrono::nanoseconds mPhaseOffset = 6ms;
+ static constexpr std::chrono::nanoseconds mWorkDuration = 20ms;
+ static constexpr std::chrono::nanoseconds mReadyDuration = 10ms;
static constexpr int mIterations = 100;
+ const scheduler::VSyncDispatch::CallbackToken mFakeToken{2398};
+ const std::string mName = "DispSyncSourceTest";
};
DispSyncSourceTest::DispSyncSourceTest() {
@@ -66,20 +156,21 @@
ALOGD("**** Tearing down after %s.%s\n", test_info->test_case_name(), test_info->name());
}
-void DispSyncSourceTest::onVSyncEvent(nsecs_t when, nsecs_t /*expectedVSyncTimestamp*/) {
+void DispSyncSourceTest::onVSyncEvent(nsecs_t when, nsecs_t expectedVSyncTimestamp,
+ nsecs_t deadlineTimestamp) {
ALOGD("onVSyncEvent: %" PRId64, when);
- mVSyncEventCallRecorder.recordCall(when);
+ mVSyncEventCallRecorder.recordCall(when, expectedVSyncTimestamp, deadlineTimestamp);
}
void DispSyncSourceTest::createDispSync() {
- mDispSync = std::make_unique<mock::DispSync>();
+ mVSyncDispatch = std::make_unique<MockVSyncDispatch>();
}
void DispSyncSourceTest::createDispSyncSource() {
- createDispSync();
- mDispSyncSource = std::make_unique<DispSyncSource>(mDispSync.get(), mPhaseOffset.count(), true,
- "DispSyncSourceTest");
+ mDispSyncSource =
+ std::make_unique<scheduler::DispSyncSource>(*mVSyncDispatch, mWorkDuration,
+ mReadyDuration, true, mName.c_str());
mDispSyncSource->setCallback(this);
}
@@ -89,57 +180,119 @@
TEST_F(DispSyncSourceTest, createDispSync) {
createDispSync();
- EXPECT_TRUE(mDispSync);
+ EXPECT_TRUE(mVSyncDispatch);
}
TEST_F(DispSyncSourceTest, createDispSyncSource) {
+ createDispSync();
+
+ InSequence seq;
+ EXPECT_CALL(*mVSyncDispatch, registerCallback(_, mName)).WillOnce(Return(mFakeToken));
+ EXPECT_CALL(*mVSyncDispatch, cancel(mFakeToken))
+ .WillOnce(Return(scheduler::CancelResult::Cancelled));
+ EXPECT_CALL(*mVSyncDispatch, unregisterCallback(mFakeToken)).WillOnce(Return());
createDispSyncSource();
+
EXPECT_TRUE(mDispSyncSource);
}
TEST_F(DispSyncSourceTest, noCallbackAfterInit) {
+ createDispSync();
+
+ InSequence seq;
+ EXPECT_CALL(*mVSyncDispatch, registerCallback(_, mName)).Times(1);
+ EXPECT_CALL(*mVSyncDispatch, cancel(_)).Times(1);
+ EXPECT_CALL(*mVSyncDispatch, unregisterCallback(_)).Times(1);
createDispSyncSource();
+
EXPECT_TRUE(mDispSyncSource);
// DispSyncSource starts with Vsync disabled
- mDispSync->triggerCallback();
+ mVSyncDispatch->triggerCallbacks();
EXPECT_FALSE(mVSyncEventCallRecorder.waitForUnexpectedCall().has_value());
}
TEST_F(DispSyncSourceTest, waitForCallbacks) {
+ createDispSync();
+
+ InSequence seq;
+ EXPECT_CALL(*mVSyncDispatch, registerCallback(_, mName)).Times(1);
+ EXPECT_CALL(*mVSyncDispatch,
+ schedule(_, Truly([&](auto timings) {
+ return timings.workDuration == mWorkDuration.count() &&
+ timings.readyDuration == mReadyDuration.count();
+ })))
+ .Times(mIterations + 1);
+ EXPECT_CALL(*mVSyncDispatch, cancel(_)).Times(1);
+ EXPECT_CALL(*mVSyncDispatch, unregisterCallback(_)).Times(1);
createDispSyncSource();
+
EXPECT_TRUE(mDispSyncSource);
mDispSyncSource->setVSyncEnabled(true);
- EXPECT_EQ(mDispSync->getCallbackPhase(), mPhaseOffset.count());
-
for (int i = 0; i < mIterations; i++) {
- mDispSync->triggerCallback();
- EXPECT_TRUE(mVSyncEventCallRecorder.waitForCall().has_value());
+ mVSyncDispatch->triggerCallbacks();
+ const auto callbackData = mVSyncEventCallRecorder.waitForCall();
+ ASSERT_TRUE(callbackData.has_value());
+ const auto [when, expectedVSyncTimestamp, deadlineTimestamp] = callbackData.value();
+ EXPECT_EQ(when, expectedVSyncTimestamp - mWorkDuration.count() - mReadyDuration.count());
}
}
-TEST_F(DispSyncSourceTest, waitForCallbacksWithPhaseChange) {
+TEST_F(DispSyncSourceTest, waitForCallbacksWithDurationChange) {
+ createDispSync();
+
+ InSequence seq;
+ EXPECT_CALL(*mVSyncDispatch, registerCallback(_, mName)).Times(1);
+ EXPECT_CALL(*mVSyncDispatch,
+ schedule(_, Truly([&](auto timings) {
+ return timings.workDuration == mWorkDuration.count() &&
+ timings.readyDuration == mReadyDuration.count();
+ })))
+ .Times(1);
+
createDispSyncSource();
+
EXPECT_TRUE(mDispSyncSource);
mDispSyncSource->setVSyncEnabled(true);
- EXPECT_EQ(mDispSync->getCallbackPhase(), mPhaseOffset.count());
-
+ EXPECT_CALL(*mVSyncDispatch,
+ schedule(_, Truly([&](auto timings) {
+ return timings.workDuration == mWorkDuration.count() &&
+ timings.readyDuration == mReadyDuration.count();
+ })))
+ .Times(mIterations);
for (int i = 0; i < mIterations; i++) {
- mDispSync->triggerCallback();
- EXPECT_TRUE(mVSyncEventCallRecorder.waitForCall().has_value());
+ mVSyncDispatch->triggerCallbacks();
+ const auto callbackData = mVSyncEventCallRecorder.waitForCall();
+ ASSERT_TRUE(callbackData.has_value());
+ const auto [when, expectedVSyncTimestamp, deadlineTimestamp] = callbackData.value();
+ EXPECT_EQ(when, expectedVSyncTimestamp - mWorkDuration.count() - mReadyDuration.count());
}
- EXPECT_CALL(*mDispSync, getPeriod()).Times(1).WillOnce(Return(16666666));
- mDispSyncSource->setPhaseOffset((mPhaseOffset / 2).count());
+ const auto newDuration = mWorkDuration / 2;
+ EXPECT_CALL(*mVSyncDispatch, schedule(_, Truly([&](auto timings) {
+ return timings.workDuration == newDuration.count() &&
+ timings.readyDuration == 0;
+ })))
+ .Times(1);
+ mDispSyncSource->setDuration(newDuration, 0ns);
- EXPECT_EQ(mDispSync->getCallbackPhase(), (mPhaseOffset / 2).count());
-
+ EXPECT_CALL(*mVSyncDispatch, schedule(_, Truly([&](auto timings) {
+ return timings.workDuration == newDuration.count() &&
+ timings.readyDuration == 0;
+ })))
+ .Times(mIterations);
for (int i = 0; i < mIterations; i++) {
- mDispSync->triggerCallback();
- EXPECT_TRUE(mVSyncEventCallRecorder.waitForCall().has_value());
+ mVSyncDispatch->triggerCallbacks();
+ const auto callbackData = mVSyncEventCallRecorder.waitForCall();
+ ASSERT_TRUE(callbackData.has_value());
+ const auto [when, expectedVSyncTimestamp, deadlineTimestamp] = callbackData.value();
+ EXPECT_EQ(when, expectedVSyncTimestamp - newDuration.count());
}
+
+ EXPECT_CALL(*mVSyncDispatch, cancel(_)).Times(1);
+ EXPECT_CALL(*mVSyncDispatch, unregisterCallback(_)).Times(1);
}
} // namespace
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index 7fade0d..b9c9fe7 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -1325,8 +1325,6 @@
// The call disable vsyncs
EXPECT_CALL(mSchedulerCallback, setVsyncEnabled(false)).Times(1);
- // The call ends any display resyncs
- EXPECT_CALL(*mPrimaryDispSync, endResync()).Times(1);
// --------------------------------------------------------------------
// Invocation
@@ -3265,16 +3263,11 @@
EXPECT_CALL(*test->mPrimaryDispSync, setPeriod(DEFAULT_REFRESH_RATE)).Times(1);
EXPECT_CALL(*test->mPrimaryDispSync, beginResync()).Times(1);
}
-
- static void setupEndResyncCallExpectations(DisplayTransactionTest* test) {
- EXPECT_CALL(*test->mPrimaryDispSync, endResync()).Times(1);
- }
};
struct DispSyncNotSupportedVariant {
static void setupBeginResyncCallExpectations(DisplayTransactionTest* /* test */) {}
- static void setupEndResyncCallExpectations(DisplayTransactionTest* /* test */) {}
};
// --------------------------------------------------------------------
@@ -3326,7 +3319,6 @@
template <typename Case>
static void setupCallExpectations(DisplayTransactionTest* test) {
Case::EventThread::setupReleaseAndDisableVsyncCallExpectations(test);
- Case::DispSync::setupEndResyncCallExpectations(test);
Case::setupComposerCallExpectations(test, IComposerClient::PowerMode::OFF);
}
@@ -3389,7 +3381,6 @@
template <typename Case>
static void setupCallExpectations(DisplayTransactionTest* test) {
Case::EventThread::setupReleaseAndDisableVsyncCallExpectations(test);
- Case::DispSync::setupEndResyncCallExpectations(test);
Case::setupComposerCallExpectations(test, Case::Doze::ACTUAL_POWER_MODE_FOR_DOZE_SUSPEND);
}
};
diff --git a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
index aab6d01..ae94f16 100644
--- a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
+++ b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
@@ -46,7 +46,9 @@
MOCK_METHOD1(setVSyncEnabled, void(bool));
MOCK_METHOD1(setCallback, void(VSyncSource::Callback*));
- MOCK_METHOD1(setPhaseOffset, void(nsecs_t));
+ MOCK_METHOD2(setDuration,
+ void(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration));
MOCK_METHOD1(pauseVsyncCallback, void(bool));
MOCK_CONST_METHOD1(dump, void(std::string&));
};
@@ -74,7 +76,8 @@
ISurfaceComposer::ConfigChanged configChanged);
void expectVSyncSetEnabledCallReceived(bool expectedState);
- void expectVSyncSetPhaseOffsetCallReceived(nsecs_t expectedPhaseOffset);
+ void expectVSyncSetDurationCallReceived(std::chrono::nanoseconds expectedDuration,
+ std::chrono::nanoseconds expectedReadyDuration);
VSyncSource::Callback* expectVSyncSetCallbackCallReceived();
void expectInterceptCallReceived(nsecs_t expectedTimestamp);
void expectVsyncEventReceivedByConnection(const char* name,
@@ -89,7 +92,8 @@
AsyncCallRecorder<void (*)(bool)> mVSyncSetEnabledCallRecorder;
AsyncCallRecorder<void (*)(VSyncSource::Callback*)> mVSyncSetCallbackCallRecorder;
- AsyncCallRecorder<void (*)(nsecs_t)> mVSyncSetPhaseOffsetCallRecorder;
+ AsyncCallRecorder<void (*)(std::chrono::nanoseconds, std::chrono::nanoseconds)>
+ mVSyncSetDurationCallRecorder;
AsyncCallRecorder<void (*)()> mResyncCallRecorder;
AsyncCallRecorder<void (*)(nsecs_t)> mInterceptVSyncCallRecorder;
ConnectionEventRecorder mConnectionEventCallRecorder{0};
@@ -114,8 +118,8 @@
EXPECT_CALL(*mVSyncSource, setCallback(_))
.WillRepeatedly(Invoke(mVSyncSetCallbackCallRecorder.getInvocable()));
- EXPECT_CALL(*mVSyncSource, setPhaseOffset(_))
- .WillRepeatedly(Invoke(mVSyncSetPhaseOffsetCallRecorder.getInvocable()));
+ EXPECT_CALL(*mVSyncSource, setDuration(_, _))
+ .WillRepeatedly(Invoke(mVSyncSetDurationCallRecorder.getInvocable()));
createThread(std::move(vsyncSource));
mConnection = createConnection(mConnectionEventCallRecorder,
@@ -159,10 +163,12 @@
EXPECT_EQ(expectedState, std::get<0>(args.value()));
}
-void EventThreadTest::expectVSyncSetPhaseOffsetCallReceived(nsecs_t expectedPhaseOffset) {
- auto args = mVSyncSetPhaseOffsetCallRecorder.waitForCall();
+void EventThreadTest::expectVSyncSetDurationCallReceived(
+ std::chrono::nanoseconds expectedDuration, std::chrono::nanoseconds expectedReadyDuration) {
+ auto args = mVSyncSetDurationCallRecorder.waitForCall();
ASSERT_TRUE(args.has_value());
- EXPECT_EQ(expectedPhaseOffset, std::get<0>(args.value()));
+ EXPECT_EQ(expectedDuration, std::get<0>(args.value()));
+ EXPECT_EQ(expectedReadyDuration, std::get<1>(args.value()));
}
VSyncSource::Callback* EventThreadTest::expectVSyncSetCallbackCallReceived() {
@@ -229,7 +235,7 @@
TEST_F(EventThreadTest, canCreateAndDestroyThreadWithNoEventsSent) {
EXPECT_FALSE(mVSyncSetEnabledCallRecorder.waitForUnexpectedCall().has_value());
EXPECT_FALSE(mVSyncSetCallbackCallRecorder.waitForCall(0us).has_value());
- EXPECT_FALSE(mVSyncSetPhaseOffsetCallRecorder.waitForCall(0us).has_value());
+ EXPECT_FALSE(mVSyncSetDurationCallRecorder.waitForCall(0us).has_value());
EXPECT_FALSE(mResyncCallRecorder.waitForCall(0us).has_value());
EXPECT_FALSE(mInterceptVSyncCallRecorder.waitForCall(0us).has_value());
EXPECT_FALSE(mConnectionEventCallRecorder.waitForCall(0us).has_value());
@@ -258,14 +264,14 @@
// Use the received callback to signal a first vsync event.
// The interceptor should receive the event, as well as the connection.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
expectVsyncEventReceivedByConnection(123, 1u);
// Use the received callback to signal a second vsync event.
// The interceptor should receive the event, but the the connection should
// not as it was only interested in the first.
- mCallback->onVSyncEvent(456, 123);
+ mCallback->onVSyncEvent(456, 123, 0);
expectInterceptCallReceived(456);
EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
@@ -299,7 +305,7 @@
// Send a vsync event. EventThread should then make a call to the
// interceptor, and the second connection. The first connection should not
// get the event.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 0);
expectInterceptCallReceived(123);
EXPECT_FALSE(firstConnectionEventRecorder.waitForUnexpectedCall().has_value());
expectVsyncEventReceivedByConnection("secondConnection", secondConnectionEventRecorder, 123,
@@ -314,17 +320,17 @@
// Send a vsync event. EventThread should then make a call to the
// interceptor, and the connection.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
expectVsyncEventReceivedByConnection(123, 1u);
// A second event should go to the same places.
- mCallback->onVSyncEvent(456, 123);
+ mCallback->onVSyncEvent(456, 123, 0);
expectInterceptCallReceived(456);
expectVsyncEventReceivedByConnection(456, 2u);
// A third event should go to the same places.
- mCallback->onVSyncEvent(789, 777);
+ mCallback->onVSyncEvent(789, 777, 111);
expectInterceptCallReceived(789);
expectVsyncEventReceivedByConnection(789, 3u);
}
@@ -336,22 +342,22 @@
expectVSyncSetEnabledCallReceived(true);
// The first event will be seen by the interceptor, and not the connection.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
// The second event will be seen by the interceptor and the connection.
- mCallback->onVSyncEvent(456, 123);
+ mCallback->onVSyncEvent(456, 123, 0);
expectInterceptCallReceived(456);
expectVsyncEventReceivedByConnection(456, 2u);
// The third event will be seen by the interceptor, and not the connection.
- mCallback->onVSyncEvent(789, 777);
+ mCallback->onVSyncEvent(789, 777, 744);
expectInterceptCallReceived(789);
EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
// The fourth event will be seen by the interceptor and the connection.
- mCallback->onVSyncEvent(101112, 7847);
+ mCallback->onVSyncEvent(101112, 7847, 86);
expectInterceptCallReceived(101112);
expectVsyncEventReceivedByConnection(101112, 4u);
}
@@ -366,7 +372,7 @@
mConnection = nullptr;
// The first event will be seen by the interceptor, and not the connection.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
@@ -386,13 +392,13 @@
// The first event will be seen by the interceptor, and by the connection,
// which then returns an error.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 123, 1u);
// A subsequent event will be seen by the interceptor and not by the
// connection.
- mCallback->onVSyncEvent(456, 123);
+ mCallback->onVSyncEvent(456, 123, 0);
expectInterceptCallReceived(456);
EXPECT_FALSE(errorConnectionEventRecorder.waitForUnexpectedCall().has_value());
@@ -420,7 +426,7 @@
// The first event will be seen by the interceptor, and by the connection,
// which then returns an error.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 123, 1u);
expectVsyncEventReceivedByConnection("successConnection", secondConnectionEventRecorder, 123,
@@ -440,13 +446,13 @@
// The first event will be seen by the interceptor, and by the connection,
// which then returns an non-fatal error.
- mCallback->onVSyncEvent(123, 456);
+ mCallback->onVSyncEvent(123, 456, 789);
expectInterceptCallReceived(123);
expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 123, 1u);
// A subsequent event will be seen by the interceptor, and by the connection,
// which still then returns an non-fatal error.
- mCallback->onVSyncEvent(456, 123);
+ mCallback->onVSyncEvent(456, 123, 0);
expectInterceptCallReceived(456);
expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 456, 2u);
@@ -455,8 +461,8 @@
}
TEST_F(EventThreadTest, setPhaseOffsetForwardsToVSyncSource) {
- mThread->setPhaseOffset(321);
- expectVSyncSetPhaseOffsetCallReceived(321);
+ mThread->setDuration(321ns, 456ns);
+ expectVSyncSetDurationCallReceived(321ns, 456ns);
}
TEST_F(EventThreadTest, postHotplugInternalDisconnect) {
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index 39e793a..35619a3 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -120,8 +120,8 @@
mScheduler.dump(handle, output);
EXPECT_TRUE(output.empty());
- EXPECT_CALL(*mEventThread, setPhaseOffset(_)).Times(0);
- mScheduler.setPhaseOffset(handle, 10);
+ EXPECT_CALL(*mEventThread, setDuration(10ns, 20ns)).Times(0);
+ mScheduler.setDuration(handle, 10ns, 20ns);
}
TEST_F(SchedulerTest, validConnectionHandle) {
@@ -146,8 +146,8 @@
mScheduler.dump(mConnectionHandle, output);
EXPECT_FALSE(output.empty());
- EXPECT_CALL(*mEventThread, setPhaseOffset(10)).Times(1);
- mScheduler.setPhaseOffset(mConnectionHandle, 10);
+ EXPECT_CALL(*mEventThread, setDuration(10ns, 20ns)).Times(1);
+ mScheduler.setDuration(mConnectionHandle, 10ns, 20ns);
static constexpr size_t kEventConnections = 5;
EXPECT_CALL(*mEventThread, getEventThreadConnectionCount()).WillOnce(Return(kEventConnections));
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
index c2a7752..3408fed 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -104,14 +104,18 @@
class RepeatingCallbackReceiver {
public:
- RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t wl)
- : mWorkload(wl),
+ RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t workload, nsecs_t readyDuration)
+ : mWorkload(workload),
+ mReadyDuration(readyDuration),
mCallback(
- dispatch, [&](auto time, auto) { callback_called(time); }, "repeat0") {}
+ dispatch, [&](auto time, auto, auto) { callback_called(time); }, "repeat0") {}
void repeatedly_schedule(size_t iterations, std::function<void(nsecs_t)> const& onEachFrame) {
mCallbackTimes.reserve(iterations);
- mCallback.schedule(mWorkload, systemTime(SYSTEM_TIME_MONOTONIC) + mWorkload);
+ mCallback.schedule(
+ {.workDuration = mWorkload,
+ .readyDuration = mReadyDuration,
+ .earliestVsync = systemTime(SYSTEM_TIME_MONOTONIC) + mWorkload + mReadyDuration});
for (auto i = 0u; i < iterations - 1; i++) {
std::unique_lock<decltype(mMutex)> lk(mMutex);
@@ -122,7 +126,9 @@
onEachFrame(last);
- mCallback.schedule(mWorkload, last + mWorkload);
+ mCallback.schedule({.workDuration = mWorkload,
+ .readyDuration = mReadyDuration,
+ .earliestVsync = last + mWorkload + mReadyDuration});
}
// wait for the last callback.
@@ -144,6 +150,7 @@
}
nsecs_t const mWorkload;
+ nsecs_t const mReadyDuration;
VSyncCallbackRegistration mCallback;
std::mutex mMutex;
@@ -160,9 +167,9 @@
static size_t constexpr num_clients = 3;
std::array<RepeatingCallbackReceiver, num_clients>
- cb_receiver{RepeatingCallbackReceiver(dispatch, toNs(1500us)),
- RepeatingCallbackReceiver(dispatch, toNs(0h)),
- RepeatingCallbackReceiver(dispatch, toNs(1ms))};
+ cb_receiver{RepeatingCallbackReceiver(dispatch, toNs(1500us), toNs(2500us)),
+ RepeatingCallbackReceiver(dispatch, toNs(0h), toNs(0h)),
+ RepeatingCallbackReceiver(dispatch, toNs(1ms), toNs(3ms))};
auto const on_each_frame = [](nsecs_t) {};
std::array<std::thread, num_clients> threads{
@@ -187,7 +194,7 @@
VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
mVsyncMoveThreshold);
- RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
auto const on_each_frame = [&](nsecs_t last_known) {
tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
@@ -205,7 +212,7 @@
VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
mVsyncMoveThreshold);
- RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
auto jump_frame_counter = 0u;
auto constexpr jump_frame_at = 10u;
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
index f630e3b..24e243f 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
@@ -109,22 +109,24 @@
CountingCallback(VSyncDispatch& dispatch)
: mDispatch(dispatch),
mToken(dispatch.registerCallback(std::bind(&CountingCallback::counter, this,
- std::placeholders::_1,
- std::placeholders::_2),
+ std::placeholders::_1, std::placeholders::_2,
+ std::placeholders::_3),
"test")) {}
~CountingCallback() { mDispatch.unregisterCallback(mToken); }
operator VSyncDispatch::CallbackToken() const { return mToken; }
- void counter(nsecs_t time, nsecs_t wakeup_time) {
+ void counter(nsecs_t time, nsecs_t wakeup_time, nsecs_t readyTime) {
mCalls.push_back(time);
mWakeupTime.push_back(wakeup_time);
+ mReadyTime.push_back(readyTime);
}
VSyncDispatch& mDispatch;
VSyncDispatch::CallbackToken mToken;
std::vector<nsecs_t> mCalls;
std::vector<nsecs_t> mWakeupTime;
+ std::vector<nsecs_t> mReadyTime;
};
class PausingCallback {
@@ -228,7 +230,11 @@
VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
mVsyncMoveThreshold};
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 100, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
}
}
@@ -237,7 +243,11 @@
EXPECT_CALL(mMockClock, alarmAt(_, 900));
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 100, intended), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = intended}),
+ ScheduleResult::Scheduled);
advanceToNextCallback();
ASSERT_THAT(cb.mCalls.size(), Eq(1));
@@ -249,7 +259,7 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1050));
CountingCallback cb(mDispatch);
- mDispatch.schedule(cb, 100, mPeriod);
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
advanceToNextCallback();
ASSERT_THAT(cb.mCalls.size(), Eq(1));
@@ -265,7 +275,11 @@
EXPECT_CALL(mMockClock, alarmAt(_, mPeriod));
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, workDuration, mPeriod), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = workDuration,
+ .readyDuration = 0,
+ .earliestVsync = mPeriod}),
+ ScheduleResult::Scheduled);
}
TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancel) {
@@ -273,7 +287,11 @@
EXPECT_CALL(mMockClock, alarmCancel());
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 100, mPeriod), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = mPeriod}),
+ ScheduleResult::Scheduled);
EXPECT_EQ(mDispatch.cancel(cb), CancelResult::Cancelled);
}
@@ -282,7 +300,11 @@
EXPECT_CALL(mMockClock, alarmCancel());
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 100, mPeriod), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = mPeriod}),
+ ScheduleResult::Scheduled);
mMockClock.advanceBy(950);
EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
}
@@ -292,7 +314,11 @@
EXPECT_CALL(mMockClock, alarmCancel());
PausingCallback cb(mDispatch, std::chrono::duration_cast<std::chrono::milliseconds>(1s));
- EXPECT_EQ(mDispatch.schedule(cb, 100, mPeriod), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = mPeriod}),
+ ScheduleResult::Scheduled);
std::thread pausingThread([&] { mMockClock.advanceToNextCallback(); });
EXPECT_TRUE(cb.waitForPause());
@@ -309,7 +335,11 @@
PausingCallback cb(mDispatch, 50ms);
cb.stashResource(resource);
- EXPECT_EQ(mDispatch.schedule(cb, 100, mPeriod), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = mPeriod}),
+ ScheduleResult::Scheduled);
std::thread pausingThread([&] { mMockClock.advanceToNextCallback(); });
EXPECT_TRUE(cb.waitForPause());
@@ -339,8 +369,8 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 100, mPeriod);
- mDispatch.schedule(cb1, 250, mPeriod);
+ mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+ mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
advanceToNextCallback();
advanceToNextCallback();
@@ -367,8 +397,9 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 100, mPeriod * 10);
- mDispatch.schedule(cb1, 250, mPeriod);
+ mDispatch.schedule(cb0,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
+ mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
mDispatch.cancel(cb1);
}
@@ -380,9 +411,9 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 400, 1000);
- mDispatch.schedule(cb1, 200, 1000);
- mDispatch.schedule(cb1, 300, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
}
@@ -395,9 +426,9 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 400, 1000);
- mDispatch.schedule(cb1, 200, 1000);
- mDispatch.schedule(cb1, 500, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
}
@@ -415,9 +446,10 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 400, 1000);
- mDispatch.schedule(cb1, 200, 1000);
- mDispatch.schedule(cb1, closeOffset, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1,
+ {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
ASSERT_THAT(cb0.mCalls.size(), Eq(1));
@@ -425,8 +457,9 @@
ASSERT_THAT(cb1.mCalls.size(), Eq(1));
EXPECT_THAT(cb1.mCalls[0], Eq(mPeriod));
- mDispatch.schedule(cb0, 400, 2000);
- mDispatch.schedule(cb1, notCloseOffset, 2000);
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb1,
+ {.workDuration = notCloseOffset, .readyDuration = 0, .earliestVsync = 2000});
advanceToNextCallback();
ASSERT_THAT(cb1.mCalls.size(), Eq(2));
EXPECT_THAT(cb1.mCalls[1], Eq(2000));
@@ -446,8 +479,8 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 100, 1000);
- mDispatch.schedule(cb1, 200, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
EXPECT_EQ(mDispatch.cancel(cb0), CancelResult::Cancelled);
}
@@ -460,18 +493,18 @@
.WillOnce(Return(2950));
CountingCallback cb(mDispatch);
- mDispatch.schedule(cb, 100, 920);
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
mMockClock.advanceBy(850);
EXPECT_THAT(cb.mCalls.size(), Eq(1));
- mDispatch.schedule(cb, 100, 1900);
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
mMockClock.advanceBy(900);
EXPECT_THAT(cb.mCalls.size(), Eq(1));
mMockClock.advanceBy(125);
EXPECT_THAT(cb.mCalls.size(), Eq(2));
- mDispatch.schedule(cb, 100, 2900);
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
mMockClock.advanceBy(975);
EXPECT_THAT(cb.mCalls.size(), Eq(3));
}
@@ -482,10 +515,16 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
VSyncDispatch::CallbackToken tmp;
- tmp = mDispatch.registerCallback([&](auto, auto) { mDispatch.schedule(tmp, 100, 2000); },
- "o.o");
+ tmp = mDispatch.registerCallback(
+ [&](auto, auto, auto) {
+ mDispatch.schedule(tmp,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = 2000});
+ },
+ "o.o");
- mDispatch.schedule(tmp, 100, 1000);
+ mDispatch.schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
}
@@ -493,17 +532,27 @@
VSyncDispatch::CallbackToken tmp;
std::optional<nsecs_t> lastTarget;
tmp = mDispatch.registerCallback(
- [&](auto timestamp, auto) {
- EXPECT_EQ(mDispatch.schedule(tmp, 400, timestamp - mVsyncMoveThreshold),
+ [&](auto timestamp, auto, auto) {
+ EXPECT_EQ(mDispatch.schedule(tmp,
+ {.workDuration = 400,
+ .readyDuration = 0,
+ .earliestVsync = timestamp - mVsyncMoveThreshold}),
ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(tmp, 400, timestamp), ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(tmp, 400, timestamp + mVsyncMoveThreshold),
+ EXPECT_EQ(mDispatch.schedule(tmp,
+ {.workDuration = 400,
+ .readyDuration = 0,
+ .earliestVsync = timestamp}),
+ ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(tmp,
+ {.workDuration = 400,
+ .readyDuration = 0,
+ .earliestVsync = timestamp + mVsyncMoveThreshold}),
ScheduleResult::Scheduled);
lastTarget = timestamp;
},
"oo");
- mDispatch.schedule(tmp, 999, 1000);
+ mDispatch.schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
EXPECT_THAT(lastTarget, Eq(1000));
@@ -519,16 +568,16 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
CountingCallback cb(mDispatch);
- mDispatch.schedule(cb, 0, 1000);
+ mDispatch.schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
mMockClock.advanceBy(750);
- mDispatch.schedule(cb, 50, 1000);
+ mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
- mDispatch.schedule(cb, 50, 2000);
+ mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
mMockClock.advanceBy(800);
- mDispatch.schedule(cb, 100, 2000);
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
}
TEST_F(VSyncDispatchTimerQueueTest, lateModifications) {
@@ -541,12 +590,12 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 500, 1000);
- mDispatch.schedule(cb1, 100, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
- mDispatch.schedule(cb0, 200, 2000);
- mDispatch.schedule(cb1, 150, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
advanceToNextCallback();
@@ -558,8 +607,8 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch.schedule(cb0, 500, 1000);
- mDispatch.schedule(cb1, 500, 20000);
+ mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
}
TEST_F(VSyncDispatchTimerQueueTest, setsTimerAfterCancellation) {
@@ -569,31 +618,43 @@
EXPECT_CALL(mMockClock, alarmAt(_, 900)).InSequence(seq);
CountingCallback cb0(mDispatch);
- mDispatch.schedule(cb0, 500, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
mDispatch.cancel(cb0);
- mDispatch.schedule(cb0, 100, 1000);
+ mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
}
TEST_F(VSyncDispatchTimerQueueTest, makingUpIdsError) {
VSyncDispatch::CallbackToken token(100);
- EXPECT_THAT(mDispatch.schedule(token, 100, 1000), Eq(ScheduleResult::Error));
+ EXPECT_THAT(mDispatch.schedule(token,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = 1000}),
+ Eq(ScheduleResult::Error));
EXPECT_THAT(mDispatch.cancel(token), Eq(CancelResult::Error));
}
TEST_F(VSyncDispatchTimerQueueTest, canMoveCallbackBackwardsInTime) {
CountingCallback cb0(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb0, 500, 1000), ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(cb0, 100, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb0,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb0,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
}
// b/1450138150
TEST_F(VSyncDispatchTimerQueueTest, doesNotMoveCallbackBackwardsAndSkipAScheduledTargetVSync) {
EXPECT_CALL(mMockClock, alarmAt(_, 500));
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 500, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
mMockClock.advanceBy(400);
- EXPECT_EQ(mDispatch.schedule(cb, 800, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
advanceToNextCallback();
ASSERT_THAT(cb.mCalls.size(), Eq(1));
}
@@ -604,16 +665,24 @@
.WillOnce(Return(1000))
.WillOnce(Return(1002));
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 500, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
mMockClock.advanceBy(400);
- EXPECT_EQ(mDispatch.schedule(cb, 400, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
}
TEST_F(VSyncDispatchTimerQueueTest, canScheduleNegativeOffsetAgainstDifferentPeriods) {
CountingCallback cb0(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb0, 500, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb0,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
advanceToNextCallback();
- EXPECT_EQ(mDispatch.schedule(cb0, 1100, 2000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb0,
+ {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000}),
+ ScheduleResult::Scheduled);
}
TEST_F(VSyncDispatchTimerQueueTest, canScheduleLargeNegativeOffset) {
@@ -621,18 +690,26 @@
EXPECT_CALL(mMockClock, alarmAt(_, 500)).InSequence(seq);
EXPECT_CALL(mMockClock, alarmAt(_, 1100)).InSequence(seq);
CountingCallback cb0(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb0, 500, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb0,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
advanceToNextCallback();
- EXPECT_EQ(mDispatch.schedule(cb0, 1900, 2000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb0,
+ {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000}),
+ ScheduleResult::Scheduled);
}
TEST_F(VSyncDispatchTimerQueueTest, scheduleUpdatesDoesNotAffectSchedulingState) {
EXPECT_CALL(mMockClock, alarmAt(_, 600));
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 400, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(cb, 1400, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
advanceToNextCallback();
}
@@ -642,12 +719,12 @@
EXPECT_CALL(mMockClock, alarmCancel()).Times(1);
VSyncCallbackRegistration cb(
- mDispatch, [](auto, auto) {}, "");
+ mDispatch, [](auto, auto, auto) {}, "");
VSyncCallbackRegistration cb1(std::move(cb));
- cb.schedule(100, 1000);
+ cb.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
cb.cancel();
- cb1.schedule(500, 1000);
+ cb1.schedule({.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
cb1.cancel();
}
@@ -656,14 +733,14 @@
EXPECT_CALL(mMockClock, alarmCancel()).Times(1);
VSyncCallbackRegistration cb(
- mDispatch, [](auto, auto) {}, "");
+ mDispatch, [](auto, auto, auto) {}, "");
VSyncCallbackRegistration cb1(
- mDispatch, [](auto, auto) {}, "");
+ mDispatch, [](auto, auto, auto) {}, "");
cb1 = std::move(cb);
- cb.schedule(100, 1000);
+ cb.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
cb.cancel();
- cb1.schedule(500, 1000);
+ cb1.schedule({.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
cb1.cancel();
}
@@ -675,12 +752,16 @@
CountingCallback cb1(mDispatch);
CountingCallback cb2(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb1, 400, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
- EXPECT_EQ(mDispatch.schedule(cb2, 100, 2000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb2,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000}),
+ ScheduleResult::Scheduled);
mMockClock.advanceBy(80);
EXPECT_THAT(cb1.mCalls.size(), Eq(1));
@@ -696,12 +777,16 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1630)).InSequence(seq);
CountingCallback cb(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb, 400, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
- EXPECT_EQ(mDispatch.schedule(cb, 370, 2000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000}),
+ ScheduleResult::Scheduled);
mMockClock.advanceBy(80);
EXPECT_THAT(cb.mCalls.size(), Eq(1));
@@ -715,8 +800,12 @@
CountingCallback cb1(mDispatch);
CountingCallback cb2(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb1, 400, 1000), ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(cb2, 100, 2000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb2,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000}),
+ ScheduleResult::Scheduled);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
@@ -737,8 +826,12 @@
CountingCallback cb1(mDispatch);
CountingCallback cb2(mDispatch);
- EXPECT_EQ(mDispatch.schedule(cb1, 400, 1000), ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(cb2, 100, 2000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb2,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000}),
+ ScheduleResult::Scheduled);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
@@ -766,16 +859,44 @@
.InSequence(seq)
.WillOnce(Return(1000));
- EXPECT_EQ(mDispatch.schedule(cb1, 400, 1000), ScheduleResult::Scheduled);
- EXPECT_EQ(mDispatch.schedule(cb2, 390, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb2,
+ {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000}),
+ ScheduleResult::Scheduled);
mMockClock.setLag(100);
mMockClock.advanceBy(700);
ASSERT_THAT(cb1.mWakeupTime.size(), Eq(1));
EXPECT_THAT(cb1.mWakeupTime[0], Eq(600));
+ ASSERT_THAT(cb1.mReadyTime.size(), Eq(1));
+ EXPECT_THAT(cb1.mReadyTime[0], Eq(1000));
ASSERT_THAT(cb2.mWakeupTime.size(), Eq(1));
EXPECT_THAT(cb2.mWakeupTime[0], Eq(610));
+ ASSERT_THAT(cb2.mReadyTime.size(), Eq(1));
+ EXPECT_THAT(cb2.mReadyTime[0], Eq(1000));
+}
+
+TEST_F(VSyncDispatchTimerQueueTest, basicAlarmSettingFutureWithReadyDuration) {
+ auto intended = mPeriod - 230;
+ EXPECT_CALL(mMockClock, alarmAt(_, 900));
+
+ CountingCallback cb(mDispatch);
+ EXPECT_EQ(mDispatch.schedule(cb,
+ {.workDuration = 70,
+ .readyDuration = 30,
+ .earliestVsync = intended}),
+ ScheduleResult::Scheduled);
+ advanceToNextCallback();
+
+ ASSERT_THAT(cb.mCalls.size(), Eq(1));
+ EXPECT_THAT(cb.mCalls[0], Eq(mPeriod));
+ ASSERT_THAT(cb.mWakeupTime.size(), Eq(1));
+ EXPECT_THAT(cb.mWakeupTime[0], 900);
+ ASSERT_THAT(cb.mReadyTime.size(), Eq(1));
+ EXPECT_THAT(cb.mReadyTime[0], 970);
}
class VSyncDispatchTimerQueueEntryTest : public testing::Test {
@@ -788,7 +909,7 @@
TEST_F(VSyncDispatchTimerQueueEntryTest, stateAfterInitialization) {
std::string name("basicname");
VSyncDispatchTimerQueueEntry entry(
- name, [](auto, auto) {}, mVsyncMoveThreshold);
+ name, [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_THAT(entry.name(), Eq(name));
EXPECT_FALSE(entry.lastExecutedVsyncTarget());
EXPECT_FALSE(entry.wakeupTime());
@@ -796,10 +917,12 @@
TEST_F(VSyncDispatchTimerQueueEntryTest, stateScheduling) {
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_FALSE(entry.wakeupTime());
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
EXPECT_THAT(*wakeup, Eq(900));
@@ -816,10 +939,12 @@
.Times(1)
.WillOnce(Return(10000));
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_FALSE(entry.wakeupTime());
- EXPECT_THAT(entry.schedule(500, 994, mStubTracker, now), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 500, .readyDuration = 0, .earliestVsync = 994},
+ mStubTracker, now),
+ Eq(ScheduleResult::Scheduled));
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
EXPECT_THAT(*wakeup, Eq(9500));
@@ -829,21 +954,29 @@
auto callCount = 0;
auto vsyncCalledTime = 0;
auto wakeupCalledTime = 0;
+ auto readyCalledTime = 0;
VSyncDispatchTimerQueueEntry entry(
"test",
- [&](auto vsyncTime, auto wakeupTime) {
+ [&](auto vsyncTime, auto wakeupTime, auto readyTime) {
callCount++;
vsyncCalledTime = vsyncTime;
wakeupCalledTime = wakeupTime;
+ readyCalledTime = readyTime;
},
mVsyncMoveThreshold);
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
EXPECT_THAT(*wakeup, Eq(900));
- entry.callback(entry.executing(), *wakeup);
+ auto const ready = entry.readyTime();
+ ASSERT_TRUE(ready);
+ EXPECT_THAT(*ready, Eq(1000));
+
+ entry.callback(entry.executing(), *wakeup, *ready);
EXPECT_THAT(callCount, Eq(1));
EXPECT_THAT(vsyncCalledTime, Eq(mPeriod));
@@ -861,13 +994,15 @@
.WillOnce(Return(1020));
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_FALSE(entry.wakeupTime());
entry.update(mStubTracker, 0);
EXPECT_FALSE(entry.wakeupTime());
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
auto wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
EXPECT_THAT(wakeup, Eq(900));
@@ -880,8 +1015,10 @@
TEST_F(VSyncDispatchTimerQueueEntryTest, skipsUpdateIfJustScheduled) {
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
entry.update(mStubTracker, 0);
auto const wakeup = entry.wakeupTime();
@@ -891,24 +1028,35 @@
TEST_F(VSyncDispatchTimerQueueEntryTest, willSnapToNextTargettableVSync) {
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
entry.executing(); // 1000 is executing
// had 1000 not been executing, this could have been scheduled for time 800.
- EXPECT_THAT(entry.schedule(200, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
+ EXPECT_THAT(*entry.readyTime(), Eq(2000));
- EXPECT_THAT(entry.schedule(50, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
EXPECT_THAT(*entry.wakeupTime(), Eq(1950));
+ EXPECT_THAT(*entry.readyTime(), Eq(2000));
- EXPECT_THAT(entry.schedule(200, 1001, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 1001},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
+ EXPECT_THAT(*entry.readyTime(), Eq(2000));
}
TEST_F(VSyncDispatchTimerQueueEntryTest,
willRequestNextEstimateWhenSnappingToNextTargettableVSync) {
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
Sequence seq;
EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
@@ -921,35 +1069,85 @@
.InSequence(seq)
.WillOnce(Return(2000));
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
entry.executing(); // 1000 is executing
- EXPECT_THAT(entry.schedule(200, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
}
TEST_F(VSyncDispatchTimerQueueEntryTest, reportsScheduledIfStillTime) {
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
- EXPECT_THAT(entry.schedule(100, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
- EXPECT_THAT(entry.schedule(200, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
- EXPECT_THAT(entry.schedule(50, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
- EXPECT_THAT(entry.schedule(1200, 500, mStubTracker, 0), Eq(ScheduleResult::Scheduled));
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
+ EXPECT_THAT(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
+ EXPECT_THAT(entry.schedule({.workDuration = 1200, .readyDuration = 0, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
}
TEST_F(VSyncDispatchTimerQueueEntryTest, storesPendingUpdatesUntilUpdate) {
static constexpr auto effectualOffset = 200;
VSyncDispatchTimerQueueEntry entry(
- "test", [](auto, auto) {}, mVsyncMoveThreshold);
+ "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_FALSE(entry.hasPendingWorkloadUpdate());
- entry.addPendingWorkloadUpdate(100, 400);
- entry.addPendingWorkloadUpdate(effectualOffset, 700);
+ entry.addPendingWorkloadUpdate({.workDuration = 100, .readyDuration = 0, .earliestVsync = 400});
+ entry.addPendingWorkloadUpdate(
+ {.workDuration = effectualOffset, .readyDuration = 0, .earliestVsync = 400});
EXPECT_TRUE(entry.hasPendingWorkloadUpdate());
entry.update(mStubTracker, 0);
EXPECT_FALSE(entry.hasPendingWorkloadUpdate());
EXPECT_THAT(*entry.wakeupTime(), Eq(mPeriod - effectualOffset));
}
+TEST_F(VSyncDispatchTimerQueueEntryTest, runCallbackWithReadyDuration) {
+ auto callCount = 0;
+ auto vsyncCalledTime = 0;
+ auto wakeupCalledTime = 0;
+ auto readyCalledTime = 0;
+ VSyncDispatchTimerQueueEntry entry(
+ "test",
+ [&](auto vsyncTime, auto wakeupTime, auto readyTime) {
+ callCount++;
+ vsyncCalledTime = vsyncTime;
+ wakeupCalledTime = wakeupTime;
+ readyCalledTime = readyTime;
+ },
+ mVsyncMoveThreshold);
+
+ EXPECT_THAT(entry.schedule({.workDuration = 70, .readyDuration = 30, .earliestVsync = 500},
+ mStubTracker, 0),
+ Eq(ScheduleResult::Scheduled));
+ auto const wakeup = entry.wakeupTime();
+ ASSERT_TRUE(wakeup);
+ EXPECT_THAT(*wakeup, Eq(900));
+
+ auto const ready = entry.readyTime();
+ ASSERT_TRUE(ready);
+ EXPECT_THAT(*ready, Eq(970));
+
+ entry.callback(entry.executing(), *wakeup, *ready);
+
+ EXPECT_THAT(callCount, Eq(1));
+ EXPECT_THAT(vsyncCalledTime, Eq(mPeriod));
+ EXPECT_THAT(wakeupCalledTime, Eq(*wakeup));
+ EXPECT_FALSE(entry.wakeupTime());
+ auto lastCalledTarget = entry.lastExecutedVsyncTarget();
+ ASSERT_TRUE(lastCalledTarget);
+ EXPECT_THAT(*lastCalledTarget, Eq(mPeriod));
+}
+
} // namespace android::scheduler
// TODO(b/129481165): remove the #pragma below and fix conversion issues
diff --git a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
index c5cddf3..0c1ae14 100644
--- a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
@@ -63,9 +63,9 @@
class MockVSyncDispatch : public VSyncDispatch {
public:
MOCK_METHOD2(registerCallback,
- CallbackToken(std::function<void(nsecs_t, nsecs_t)> const&, std::string));
+ CallbackToken(std::function<void(nsecs_t, nsecs_t, nsecs_t)> const&, std::string));
MOCK_METHOD1(unregisterCallback, void(CallbackToken));
- MOCK_METHOD3(schedule, ScheduleResult(CallbackToken, nsecs_t, nsecs_t));
+ MOCK_METHOD2(schedule, ScheduleResult(CallbackToken, ScheduleTiming));
MOCK_METHOD1(cancel, CancelResult(CallbackToken token));
MOCK_CONST_METHOD1(dump, void(std::string&));
};
@@ -92,22 +92,6 @@
return ft;
}
-class StubCallback : public DispSync::Callback {
-public:
- void onDispSyncEvent(nsecs_t when, nsecs_t /*expectedVSyncTimestamp*/) final {
- std::lock_guard<std::mutex> lk(mMutex);
- mLastCallTime = when;
- }
- std::optional<nsecs_t> lastCallTime() const {
- std::lock_guard<std::mutex> lk(mMutex);
- return mLastCallTime;
- }
-
-private:
- std::mutex mutable mMutex;
- std::optional<nsecs_t> mLastCallTime GUARDED_BY(mMutex);
-};
-
class VSyncReactorTest : public testing::Test {
protected:
VSyncReactorTest()
@@ -135,7 +119,7 @@
VSyncDispatch::CallbackToken const mFakeToken{2398};
nsecs_t lastCallbackTime = 0;
- StubCallback outerCb;
+ // StubCallback outerCb;
std::function<void(nsecs_t, nsecs_t)> innerCb;
VSyncReactor mReactor;
@@ -489,192 +473,6 @@
EXPECT_FALSE(mReactor.addResyncSample(time += newPeriod2, std::nullopt, &periodFlushed));
}
-static nsecs_t computeWorkload(nsecs_t period, nsecs_t phase) {
- return period - phase;
-}
-
-TEST_F(VSyncReactorTest, addEventListener) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(Return(mFakeToken));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch, cancel(mFakeToken)).Times(2).InSequence(seq);
- EXPECT_CALL(*mMockDispatch, unregisterCallback(mFakeToken)).InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- mReactor.removeEventListener(&outerCb, &lastCallbackTime);
-}
-
-TEST_F(VSyncReactorTest, addEventListenerTwiceChangesPhase) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(Return(mFakeToken));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch,
- schedule(mFakeToken, computeWorkload(period, mAnotherPhase), _)) // mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch, cancel(mFakeToken)).InSequence(seq);
- EXPECT_CALL(*mMockDispatch, unregisterCallback(mFakeToken)).InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- mReactor.addEventListener(mName, mAnotherPhase, &outerCb, lastCallbackTime);
-}
-
-TEST_F(VSyncReactorTest, eventListenerGetsACallbackAndReschedules) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(DoAll(SaveArg<0>(&innerCb), Return(mFakeToken)));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch,
- schedule(mFakeToken, computeWorkload(period, mPhase), mFakeVSyncTime))
- .Times(2)
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch, cancel(mFakeToken)).InSequence(seq);
- EXPECT_CALL(*mMockDispatch, unregisterCallback(mFakeToken)).InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- ASSERT_TRUE(innerCb);
- innerCb(mFakeVSyncTime, mFakeWakeupTime);
- innerCb(mFakeVSyncTime, mFakeWakeupTime);
-}
-
-TEST_F(VSyncReactorTest, callbackTimestampDistributedIsWakeupTime) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, _))
- .InSequence(seq)
- .WillOnce(DoAll(SaveArg<0>(&innerCb), Return(mFakeToken)));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch,
- schedule(mFakeToken, computeWorkload(period, mPhase), mFakeVSyncTime))
- .InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- ASSERT_TRUE(innerCb);
- innerCb(mFakeVSyncTime, mFakeWakeupTime);
- EXPECT_THAT(outerCb.lastCallTime(), Optional(mFakeWakeupTime));
-}
-
-TEST_F(VSyncReactorTest, eventListenersRemovedOnDestruction) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(Return(mFakeToken));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch, cancel(mFakeToken)).InSequence(seq);
- EXPECT_CALL(*mMockDispatch, unregisterCallback(mFakeToken)).InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
-}
-
-// b/149221293
-TEST_F(VSyncReactorTest, selfRemovingEventListenerStopsCallbacks) {
- class SelfRemovingCallback : public DispSync::Callback {
- public:
- SelfRemovingCallback(VSyncReactor& vsr) : mVsr(vsr) {}
- void onDispSyncEvent(nsecs_t when, nsecs_t /*expectedVSyncTimestamp*/) final {
- mVsr.removeEventListener(this, &when);
- }
-
- private:
- VSyncReactor& mVsr;
- } selfRemover(mReactor);
-
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(DoAll(SaveArg<0>(&innerCb), Return(mFakeToken)));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch, cancel(mFakeToken)).Times(2).InSequence(seq);
- EXPECT_CALL(*mMockDispatch, unregisterCallback(mFakeToken)).InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &selfRemover, lastCallbackTime);
- innerCb(0, 0);
-}
-
-TEST_F(VSyncReactorTest, addEventListenerChangePeriod) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(Return(mFakeToken));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch,
- schedule(mFakeToken, computeWorkload(period, mAnotherPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockDispatch, cancel(mFakeToken)).InSequence(seq);
- EXPECT_CALL(*mMockDispatch, unregisterCallback(mFakeToken)).InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- mReactor.addEventListener(mName, mAnotherPhase, &outerCb, lastCallbackTime);
-}
-
-TEST_F(VSyncReactorTest, changingPeriodChangesOffsetsOnNextCb) {
- static constexpr nsecs_t anotherPeriod = 23333;
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(Return(mFakeToken));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), mFakeNow))
- .InSequence(seq);
- EXPECT_CALL(*mMockTracker, setPeriod(anotherPeriod));
- EXPECT_CALL(*mMockDispatch,
- schedule(mFakeToken, computeWorkload(anotherPeriod, mPhase), mFakeNow))
- .InSequence(seq);
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
-
- bool periodFlushed = false;
- mReactor.setPeriod(anotherPeriod);
- EXPECT_TRUE(mReactor.addResyncSample(anotherPeriod, std::nullopt, &periodFlushed));
- EXPECT_FALSE(mReactor.addResyncSample(anotherPeriod * 2, std::nullopt, &periodFlushed));
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
-}
-
-TEST_F(VSyncReactorTest, offsetsAppliedOnNextOpportunity) {
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(DoAll(SaveArg<0>(&innerCb), Return(mFakeToken)));
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mPhase), _))
- .InSequence(seq)
- .WillOnce(Return(ScheduleResult::Scheduled));
-
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mAnotherPhase), _))
- .InSequence(seq)
- .WillOnce(Return(ScheduleResult::Scheduled));
-
- EXPECT_CALL(*mMockDispatch, schedule(mFakeToken, computeWorkload(period, mAnotherPhase), _))
- .InSequence(seq)
- .WillOnce(Return(ScheduleResult::Scheduled));
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- mReactor.changePhaseOffset(&outerCb, mAnotherPhase);
- ASSERT_TRUE(innerCb);
- innerCb(mFakeVSyncTime, mFakeWakeupTime);
-}
-
-TEST_F(VSyncReactorTest, negativeOffsetsApplied) {
- nsecs_t const negativePhase = -4000;
- Sequence seq;
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .InSequence(seq)
- .WillOnce(Return(mFakeToken));
- EXPECT_CALL(*mMockDispatch,
- schedule(mFakeToken, computeWorkload(period, negativePhase), mFakeNow))
- .InSequence(seq);
- mReactor.addEventListener(mName, negativePhase, &outerCb, lastCallbackTime);
-}
-
TEST_F(VSyncReactorTest, beginResyncResetsModel) {
EXPECT_CALL(*mMockTracker, resetModel());
mReactor.beginResync();
@@ -734,42 +532,4 @@
EXPECT_TRUE(idleReactor.addPresentFence(generateSignalledFenceWithTime(0)));
}
-using VSyncReactorDeathTest = VSyncReactorTest;
-TEST_F(VSyncReactorDeathTest, invalidRemoval) {
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- mReactor.removeEventListener(&outerCb, &lastCallbackTime);
- EXPECT_DEATH(mReactor.removeEventListener(&outerCb, &lastCallbackTime), ".*");
-}
-
-TEST_F(VSyncReactorDeathTest, invalidChange) {
- EXPECT_DEATH(mReactor.changePhaseOffset(&outerCb, mPhase), ".*");
-
- // the current DispSync-interface usage pattern has evolved around an implementation quirk,
- // which is a callback is assumed to always exist, and it is valid api usage to change the
- // offset of an object that is in the removed state.
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- mReactor.removeEventListener(&outerCb, &lastCallbackTime);
- mReactor.changePhaseOffset(&outerCb, mPhase);
-}
-
-TEST_F(VSyncReactorDeathTest, cannotScheduleOnRegistration) {
- ON_CALL(*mMockDispatch, schedule(_, _, _))
- .WillByDefault(Return(ScheduleResult::CannotSchedule));
- EXPECT_DEATH(mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime), ".*");
-}
-
-TEST_F(VSyncReactorDeathTest, cannotScheduleOnCallback) {
- EXPECT_CALL(*mMockDispatch, registerCallback(_, std::string(mName)))
- .WillOnce(DoAll(SaveArg<0>(&innerCb), Return(mFakeToken)));
- EXPECT_CALL(*mMockDispatch, schedule(_, _, _)).WillOnce(Return(ScheduleResult::Scheduled));
-
- mReactor.addEventListener(mName, mPhase, &outerCb, lastCallbackTime);
- ASSERT_TRUE(innerCb);
- Mock::VerifyAndClearExpectations(mMockDispatch.get());
-
- ON_CALL(*mMockDispatch, schedule(_, _, _))
- .WillByDefault(Return(ScheduleResult::CannotSchedule));
- EXPECT_DEATH(innerCb(mFakeVSyncTime, mFakeWakeupTime), ".*");
-}
-
} // namespace android::scheduler
diff --git a/services/surfaceflinger/tests/unittests/mock/MockDispSync.cpp b/services/surfaceflinger/tests/unittests/mock/MockDispSync.cpp
index 1c8c44d..bbd9f77 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockDispSync.cpp
+++ b/services/surfaceflinger/tests/unittests/mock/MockDispSync.cpp
@@ -25,40 +25,5 @@
DispSync::DispSync() = default;
DispSync::~DispSync() = default;
-status_t DispSync::addEventListener(const char* /*name*/, nsecs_t phase, Callback* callback,
- nsecs_t /*lastCallbackTime*/) {
- if (mCallback.callback != nullptr) {
- return BAD_VALUE;
- }
-
- mCallback = {callback, phase};
- return NO_ERROR;
-}
-status_t DispSync::removeEventListener(Callback* callback, nsecs_t* /*outLastCallback*/) {
- if (mCallback.callback != callback) {
- return BAD_VALUE;
- }
-
- mCallback = {nullptr, 0};
- return NO_ERROR;
-}
-
-status_t DispSync::changePhaseOffset(Callback* callback, nsecs_t phase) {
- if (mCallback.callback != callback) {
- return BAD_VALUE;
- }
-
- mCallback.phase = phase;
- return NO_ERROR;
-}
-
-void DispSync::triggerCallback() {
- if (mCallback.callback == nullptr) return;
-
- const std::chrono::nanoseconds now = std::chrono::steady_clock::now().time_since_epoch();
- const auto expectedVSyncTime = now + 16ms;
- mCallback.callback->onDispSyncEvent(now.count(), expectedVSyncTime.count());
-}
-
} // namespace mock
} // namespace android
diff --git a/services/surfaceflinger/tests/unittests/mock/MockDispSync.h b/services/surfaceflinger/tests/unittests/mock/MockDispSync.h
index b39487c..41445c8 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockDispSync.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockDispSync.h
@@ -28,11 +28,9 @@
DispSync();
~DispSync() override;
- MOCK_METHOD0(reset, void());
MOCK_METHOD1(addPresentFence, bool(const std::shared_ptr<FenceTime>&));
MOCK_METHOD0(beginResync, void());
MOCK_METHOD3(addResyncSample, bool(nsecs_t, std::optional<nsecs_t>, bool*));
- MOCK_METHOD0(endResync, void());
MOCK_METHOD1(setPeriod, void(nsecs_t));
MOCK_METHOD0(getPeriod, nsecs_t());
MOCK_METHOD0(getIntendedPeriod, nsecs_t());
@@ -42,22 +40,6 @@
MOCK_METHOD1(expectedPresentTime, nsecs_t(nsecs_t));
MOCK_CONST_METHOD1(dump, void(std::string&));
-
- status_t addEventListener(const char* name, nsecs_t phase, Callback* callback,
- nsecs_t lastCallbackTime) override;
- status_t removeEventListener(Callback* callback, nsecs_t* outLastCallback) override;
- status_t changePhaseOffset(Callback* callback, nsecs_t phase) override;
-
- nsecs_t getCallbackPhase() { return mCallback.phase; }
-
- void triggerCallback();
-
-private:
- struct CallbackType {
- Callback* callback = nullptr;
- nsecs_t phase;
- };
- CallbackType mCallback;
};
} // namespace mock
diff --git a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
index 054aaf8..eefdec1 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
@@ -35,7 +35,9 @@
MOCK_METHOD2(onHotplugReceived, void(PhysicalDisplayId, bool));
MOCK_METHOD3(onConfigChanged, void(PhysicalDisplayId, HwcConfigIndexType, nsecs_t));
MOCK_CONST_METHOD1(dump, void(std::string&));
- MOCK_METHOD1(setPhaseOffset, void(nsecs_t phaseOffset));
+ MOCK_METHOD2(setDuration,
+ void(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration));
MOCK_METHOD1(registerDisplayEventConnection,
status_t(const sp<android::EventThreadConnection> &));
MOCK_METHOD2(setVsyncRate, void(uint32_t, const sp<android::EventThreadConnection> &));