Merge "Add POLICY_FLAG_PASS_TO_USER to complete the gesture"
diff --git a/cmds/dumpstate/dumpstate.rc b/cmds/dumpstate/dumpstate.rc
index 12a7cff..a80da4e 100644
--- a/cmds/dumpstate/dumpstate.rc
+++ b/cmds/dumpstate/dumpstate.rc
@@ -8,7 +8,6 @@
socket dumpstate stream 0660 shell log
disabled
oneshot
- capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID KILL NET_ADMIN NET_RAW SETGID SETUID SYS_PTRACE SYS_RESOURCE BLOCK_SUSPEND SYSLOG
# dumpstatez generates a zipped bugreport but also uses a socket to print the file location once
# it is finished.
@@ -17,11 +16,9 @@
class main
disabled
oneshot
- capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID KILL NET_ADMIN NET_RAW SETGID SETUID SYS_PTRACE SYS_RESOURCE BLOCK_SUSPEND SYSLOG
# bugreportd starts dumpstate binder service and makes it wait for a listener to connect.
service bugreportd /system/bin/dumpstate -w
class main
disabled
oneshot
- capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID KILL NET_ADMIN NET_RAW SETGID SETUID SYS_PTRACE SYS_RESOURCE BLOCK_SUSPEND SYSLOG
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 91bcb8d..07809e2 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -227,6 +227,13 @@
}
#endif // !VENDORSERVICEMANAGER
+ServiceManager::Service::~Service() {
+ if (!hasClients) {
+ // only expected to happen on process death
+ LOG(WARNING) << "a service was removed when there are clients";
+ }
+}
+
ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {
// TODO(b/151696835): reenable performance hack when we solve bug, since with
// this hack and other fixes, it is unlikely we will see even an ephemeral
@@ -293,8 +300,13 @@
}
if (out) {
- // Setting this guarantee each time we hand out a binder ensures that the client-checking
- // loop knows about the event even if the client immediately drops the service
+ // Force onClients to get sent, and then make sure the timerfd won't clear it
+ // by setting guaranteeClient again. This logic could be simplified by using
+ // a time-based guarantee. However, forcing onClients(true) to get sent
+ // right here is always going to be important for processes serving multiple
+ // lazy interfaces.
+ service->guaranteeClient = true;
+ CHECK(handleServiceClientCallback(2 /* sm + transaction */, name, false));
service->guaranteeClient = true;
}
@@ -384,8 +396,13 @@
};
if (auto it = mNameToRegistrationCallback.find(name); it != mNameToRegistrationCallback.end()) {
+ // See also getService - handles case where client never gets the service,
+ // we want the service to quit.
+ mNameToService[name].guaranteeClient = true;
+ CHECK(handleServiceClientCallback(2 /* sm + transaction */, name, false));
+ mNameToService[name].guaranteeClient = true;
+
for (const sp<IServiceCallback>& cb : it->second) {
- mNameToService[name].guaranteeClient = true;
// permission checked in registerForNotifications
cb->onRegistration(name, binder);
}
@@ -706,28 +723,28 @@
void ServiceManager::handleClientCallbacks() {
for (const auto& [name, service] : mNameToService) {
- handleServiceClientCallback(name, true);
+ handleServiceClientCallback(1 /* sm has one refcount */, name, true);
}
}
-ssize_t ServiceManager::handleServiceClientCallback(const std::string& serviceName,
- bool isCalledOnInterval) {
+bool ServiceManager::handleServiceClientCallback(size_t knownClients,
+ const std::string& serviceName,
+ bool isCalledOnInterval) {
auto serviceIt = mNameToService.find(serviceName);
if (serviceIt == mNameToService.end() || mNameToClientCallback.count(serviceName) < 1) {
- return -1;
+ return true; // return we do have clients a.k.a. DON'T DO ANYTHING
}
Service& service = serviceIt->second;
ssize_t count = service.getNodeStrongRefCount();
- // binder driver doesn't support this feature
- if (count == -1) return count;
+ // binder driver doesn't support this feature, consider we have clients
+ if (count == -1) return true;
- bool hasClients = count > 1; // this process holds a strong count
+ bool hasKernelReportedClients = static_cast<size_t>(count) > knownClients;
if (service.guaranteeClient) {
- // we have no record of this client
- if (!service.hasClients && !hasClients) {
+ if (!service.hasClients && !hasKernelReportedClients) {
sendClientCallbackNotifications(serviceName, true,
"service is guaranteed to be in use");
}
@@ -736,21 +753,23 @@
service.guaranteeClient = false;
}
- // only send notifications if this was called via the interval checking workflow
- if (isCalledOnInterval) {
- if (hasClients && !service.hasClients) {
- // client was retrieved in some other way
- sendClientCallbackNotifications(serviceName, true, "we now have a record of a client");
- }
+ // Regardless of this situation, we want to give this notification as soon as possible.
+ // This way, we have a chance of preventing further thrashing.
+ if (hasKernelReportedClients && !service.hasClients) {
+ sendClientCallbackNotifications(serviceName, true, "we now have a record of a client");
+ }
- // there are no more clients, but the callback has not been called yet
- if (!hasClients && service.hasClients) {
+ // But limit rate of shutting down service.
+ if (isCalledOnInterval) {
+ if (!hasKernelReportedClients && service.hasClients) {
sendClientCallbackNotifications(serviceName, false,
"we now have no record of a client");
}
}
- return count;
+ // May be different than 'hasKernelReportedClients'. We intentionally delay
+ // information about clients going away to reduce thrashing.
+ return service.hasClients;
}
void ServiceManager::sendClientCallbackNotifications(const std::string& serviceName,
@@ -763,13 +782,10 @@
}
Service& service = serviceIt->second;
- CHECK(hasClients != service.hasClients)
- << "Record shows: " << service.hasClients
- << " so we can't tell clients again that we have client: " << hasClients
- << " when: " << context;
+ CHECK_NE(hasClients, service.hasClients) << context;
- ALOGI("Notifying %s they %s have clients when %s", serviceName.c_str(),
- hasClients ? "do" : "don't", context);
+ ALOGI("Notifying %s they %s (previously: %s) have clients when %s", serviceName.c_str(),
+ hasClients ? "do" : "don't", service.hasClients ? "do" : "don't", context);
auto ccIt = mNameToClientCallback.find(serviceName);
CHECK(ccIt != mNameToClientCallback.end())
@@ -813,26 +829,29 @@
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
}
+ // important because we don't have timer-based guarantees, we don't want to clear
+ // this
if (serviceIt->second.guaranteeClient) {
ALOGI("Tried to unregister %s, but there is about to be a client.", name.c_str());
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
}
- int clients = handleServiceClientCallback(name, false);
-
- // clients < 0: feature not implemented or other error. Assume clients.
- // Otherwise:
// - kernel driver will hold onto one refcount (during this transaction)
// - servicemanager has a refcount (guaranteed by this transaction)
- // So, if clients > 2, then at least one other service on the system must hold a refcount.
- if (clients < 0 || clients > 2) {
- // client callbacks are either disabled or there are other clients
- ALOGI("Tried to unregister %s, but there are clients: %d", name.c_str(), clients);
- // Set this flag to ensure the clients are acknowledged in the next callback
+ constexpr size_t kKnownClients = 2;
+
+ if (handleServiceClientCallback(kKnownClients, name, false)) {
+ ALOGI("Tried to unregister %s, but there are clients.", name.c_str());
+
+ // Since we had a failed registration attempt, and the HIDL implementation of
+ // delaying service shutdown for multiple periods wasn't ported here... this may
+ // help reduce thrashing, but we should be able to remove it.
serviceIt->second.guaranteeClient = true;
+
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
}
+ ALOGI("Unregistering %s", name.c_str());
mNameToService.erase(name);
return Status::ok();
diff --git a/cmds/servicemanager/ServiceManager.h b/cmds/servicemanager/ServiceManager.h
index f9d4f8f..3aa6731 100644
--- a/cmds/servicemanager/ServiceManager.h
+++ b/cmds/servicemanager/ServiceManager.h
@@ -80,6 +80,8 @@
// the number of clients of the service, including servicemanager itself
ssize_t getNodeStrongRefCount();
+
+ ~Service();
};
using ServiceCallbackMap = std::map<std::string, std::vector<sp<IServiceCallback>>>;
@@ -91,7 +93,9 @@
void removeRegistrationCallback(const wp<IBinder>& who,
ServiceCallbackMap::iterator* it,
bool* found);
- ssize_t handleServiceClientCallback(const std::string& serviceName, bool isCalledOnInterval);
+ // returns whether there are known clients in addition to the count provided
+ bool handleServiceClientCallback(size_t knownClients, const std::string& serviceName,
+ bool isCalledOnInterval);
// Also updates mHasClients (of what the last callback was)
void sendClientCallbackNotifications(const std::string& serviceName, bool hasClients,
const char* context);
diff --git a/cmds/servicemanager/servicemanager.rc b/cmds/servicemanager/servicemanager.rc
index 3bd6db5..4f92b3a 100644
--- a/cmds/servicemanager/servicemanager.rc
+++ b/cmds/servicemanager/servicemanager.rc
@@ -5,7 +5,7 @@
critical
file /dev/kmsg w
onrestart setprop servicemanager.ready false
- onrestart restart apexd
+ onrestart restart --only-if-running apexd
onrestart restart audioserver
onrestart restart gatekeeperd
onrestart class_restart --only-enabled main
diff --git a/include/input/RingBuffer.h b/include/input/RingBuffer.h
new file mode 100644
index 0000000..67984b7
--- /dev/null
+++ b/include/input/RingBuffer.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <compare>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+
+namespace android {
+
+// A fixed-size ring buffer of elements.
+//
+// Elements can only be removed from the front/back or added to the front/back, but with O(1)
+// performance. Elements from the opposing side are evicted when new elements are pushed onto a full
+// buffer.
+template <typename T>
+class RingBuffer {
+public:
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = value_type*;
+ using const_pointer = const value_type*;
+
+ template <typename U>
+ class Iterator;
+ using iterator = Iterator<T>;
+ using const_iterator = Iterator<const T>;
+
+ // Creates an empty ring buffer that can hold some capacity.
+ explicit RingBuffer(size_type capacity)
+ : mBuffer(std::allocator<value_type>().allocate(capacity)), mCapacity(capacity) {}
+
+ // Creates a full ring buffer holding a fixed number of elements initialised to some value.
+ explicit RingBuffer(size_type count, const_reference value) : RingBuffer(count) {
+ while (count) {
+ pushBack(value);
+ --count;
+ }
+ }
+
+ RingBuffer(const RingBuffer& other) : RingBuffer(other.capacity()) {
+ for (const auto& element : other) {
+ pushBack(element);
+ }
+ }
+
+ RingBuffer(RingBuffer&& other) noexcept { *this = std::move(other); }
+
+ ~RingBuffer() {
+ if (mBuffer) {
+ clear();
+ std::allocator<value_type>().deallocate(mBuffer, mCapacity);
+ }
+ }
+
+ RingBuffer& operator=(const RingBuffer& other) { return *this = RingBuffer(other); }
+
+ RingBuffer& operator=(RingBuffer&& other) noexcept {
+ if (this == &other) {
+ return *this;
+ }
+ if (mBuffer) {
+ clear();
+ std::allocator<value_type>().deallocate(mBuffer, mCapacity);
+ }
+ mBuffer = std::move(other.mBuffer);
+ mCapacity = other.mCapacity;
+ mBegin = other.mBegin;
+ mSize = other.mSize;
+ other.mBuffer = nullptr;
+ other.mCapacity = 0;
+ other.mBegin = 0;
+ other.mSize = 0;
+ return *this;
+ }
+
+ iterator begin() { return {*this, 0}; }
+ const_iterator begin() const { return {*this, 0}; }
+ iterator end() { return {*this, mSize}; }
+ const_iterator end() const { return {*this, mSize}; }
+
+ reference operator[](size_type i) { return mBuffer[bufferIndex(i)]; }
+ const_reference operator[](size_type i) const { return mBuffer[bufferIndex(i)]; }
+
+ // Removes all elements from the buffer.
+ void clear() {
+ std::destroy(begin(), end());
+ mSize = 0;
+ }
+
+ // Removes and returns the first element from the buffer.
+ value_type popFront() {
+ value_type element = mBuffer[mBegin];
+ std::destroy_at(std::addressof(mBuffer[mBegin]));
+ mBegin = next(mBegin);
+ --mSize;
+ return element;
+ }
+
+ // Removes and returns the last element from the buffer.
+ value_type popBack() {
+ size_type backIndex = bufferIndex(mSize - 1);
+ value_type element = mBuffer[backIndex];
+ std::destroy_at(std::addressof(mBuffer[backIndex]));
+ --mSize;
+ return element;
+ }
+
+ // Adds an element to the front of the buffer.
+ void pushFront(const value_type& element) { pushFront(value_type(element)); }
+ void pushFront(value_type&& element) {
+ mBegin = previous(mBegin);
+ if (size() == capacity()) {
+ mBuffer[mBegin] = std::forward<value_type>(element);
+ } else {
+ // The space at mBuffer[mBegin] is uninitialised.
+ // TODO: Use std::construct_at when it becomes available.
+ new (std::addressof(mBuffer[mBegin])) value_type(std::forward<value_type>(element));
+ ++mSize;
+ }
+ }
+
+ // Adds an element to the back of the buffer.
+ void pushBack(const value_type& element) { pushBack(value_type(element)); }
+ void pushBack(value_type&& element) {
+ if (size() == capacity()) {
+ mBuffer[mBegin] = std::forward<value_type>(element);
+ mBegin = next(mBegin);
+ } else {
+ // The space at mBuffer[...] is uninitialised.
+ // TODO: Use std::construct_at when it becomes available.
+ new (std::addressof(mBuffer[bufferIndex(mSize)]))
+ value_type(std::forward<value_type>(element));
+ ++mSize;
+ }
+ }
+
+ bool empty() const { return mSize == 0; }
+ size_type capacity() const { return mCapacity; }
+ size_type size() const { return mSize; }
+
+ void swap(RingBuffer& other) noexcept {
+ using std::swap;
+ swap(mBuffer, other.mBuffer);
+ swap(mCapacity, other.mCapacity);
+ swap(mBegin, other.mBegin);
+ swap(mSize, other.mSize);
+ }
+
+ friend void swap(RingBuffer& lhs, RingBuffer& rhs) noexcept { lhs.swap(rhs); }
+
+ template <typename U>
+ class Iterator {
+ private:
+ using ContainerType = std::conditional_t<std::is_const_v<U>, const RingBuffer, RingBuffer>;
+
+ public:
+ using iterator_category = std::random_access_iterator_tag;
+ using size_type = ContainerType::size_type;
+ using difference_type = ContainerType::difference_type;
+ using value_type = std::remove_cv_t<U>;
+ using pointer = U*;
+ using reference = U&;
+
+ Iterator(ContainerType& container, size_type index)
+ : mContainer(container), mIndex(index) {}
+
+ Iterator(const Iterator&) = default;
+ Iterator& operator=(const Iterator&) = default;
+
+ Iterator& operator++() {
+ ++mIndex;
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ Iterator iterator(*this);
+ ++(*this);
+ return iterator;
+ }
+
+ Iterator& operator--() {
+ --mIndex;
+ return *this;
+ }
+
+ Iterator operator--(int) {
+ Iterator iterator(*this);
+ --(*this);
+ return iterator;
+ }
+
+ Iterator& operator+=(difference_type n) {
+ mIndex += n;
+ return *this;
+ }
+
+ Iterator operator+(difference_type n) {
+ Iterator iterator(*this);
+ return iterator += n;
+ }
+
+ Iterator& operator-=(difference_type n) { return *this += -n; }
+
+ Iterator operator-(difference_type n) {
+ Iterator iterator(*this);
+ return iterator -= n;
+ }
+
+ difference_type operator-(const Iterator& other) { return mIndex - other.mIndex; }
+
+ bool operator==(const Iterator& rhs) const { return mIndex == rhs.mIndex; }
+
+ bool operator!=(const Iterator& rhs) const { return !(*this == rhs); }
+
+ friend auto operator<=>(const Iterator& lhs, const Iterator& rhs) {
+ return lhs.mIndex <=> rhs.mIndex;
+ }
+
+ reference operator[](difference_type n) { return *(*this + n); }
+
+ reference operator*() const { return mContainer[mIndex]; }
+ pointer operator->() const { return std::addressof(mContainer[mIndex]); }
+
+ private:
+ ContainerType& mContainer;
+ size_type mIndex = 0;
+ };
+
+private:
+ // Returns the index of the next element in mBuffer.
+ size_type next(size_type index) const {
+ if (index == capacity() - 1) {
+ return 0;
+ } else {
+ return index + 1;
+ }
+ }
+
+ // Returns the index of the previous element in mBuffer.
+ size_type previous(size_type index) const {
+ if (index == 0) {
+ return capacity() - 1;
+ } else {
+ return index - 1;
+ }
+ }
+
+ // Converts the index of an element in [0, size()] to its corresponding index in mBuffer.
+ size_type bufferIndex(size_type elementIndex) const {
+ CHECK_LE(elementIndex, size());
+ size_type index = mBegin + elementIndex;
+ if (index >= capacity()) {
+ index -= capacity();
+ }
+ CHECK_LT(index, capacity())
+ << android::base::StringPrintf("Invalid index calculated for element (%zu) "
+ "in buffer of size %zu",
+ elementIndex, size());
+ return index;
+ }
+
+ pointer mBuffer = nullptr;
+ size_type mCapacity = 0; // Total capacity of mBuffer.
+ size_type mBegin = 0; // Index of the first initialised element in mBuffer.
+ size_type mSize = 0; // Total number of initialised elements.
+};
+
+} // namespace android
diff --git a/include/input/TfLiteMotionPredictor.h b/include/input/TfLiteMotionPredictor.h
index ff0f51c..704349c 100644
--- a/include/input/TfLiteMotionPredictor.h
+++ b/include/input/TfLiteMotionPredictor.h
@@ -23,7 +23,8 @@
#include <optional>
#include <span>
#include <string>
-#include <vector>
+
+#include <input/RingBuffer.h>
#include <tensorflow/lite/core/api/error_reporter.h>
#include <tensorflow/lite/interpreter.h>
@@ -83,11 +84,11 @@
private:
int64_t mTimestamp = 0;
- std::vector<float> mInputR;
- std::vector<float> mInputPhi;
- std::vector<float> mInputPressure;
- std::vector<float> mInputTilt;
- std::vector<float> mInputOrientation;
+ RingBuffer<float> mInputR;
+ RingBuffer<float> mInputPhi;
+ RingBuffer<float> mInputPressure;
+ RingBuffer<float> mInputTilt;
+ RingBuffer<float> mInputOrientation;
// The samples defining the current polar axis.
std::optional<TfLiteMotionPredictorSample> mAxisFrom;
diff --git a/include/private/performance_hint_private.h b/include/private/performance_hint_private.h
index d50c5f8..eaf3b5e 100644
--- a/include/private/performance_hint_private.h
+++ b/include/private/performance_hint_private.h
@@ -17,8 +17,6 @@
#ifndef ANDROID_PRIVATE_NATIVE_PERFORMANCE_HINT_PRIVATE_H
#define ANDROID_PRIVATE_NATIVE_PERFORMANCE_HINT_PRIVATE_H
-#include <stdint.h>
-
__BEGIN_DECLS
/**
@@ -29,7 +27,7 @@
/**
* Hints for the session used to signal upcoming changes in the mode or workload.
*/
-enum SessionHint: int32_t {
+enum SessionHint {
/**
* This hint indicates a sudden increase in CPU workload intensity. It means
* that this hint session needs extra CPU resources immediately to meet the
@@ -63,7 +61,7 @@
* @return 0 on success
* EPIPE if communication with the system service has failed.
*/
-int APerformanceHint_sendHint(void* session, SessionHint hint);
+int APerformanceHint_sendHint(void* session, int hint);
/**
* Return the list of thread ids, this API should only be used for testing only.
diff --git a/libs/binder/BpBinder.cpp b/libs/binder/BpBinder.cpp
index d03326e..53852d8 100644
--- a/libs/binder/BpBinder.cpp
+++ b/libs/binder/BpBinder.cpp
@@ -388,7 +388,8 @@
{
if (isRpcBinder()) {
if (rpcSession()->getMaxIncomingThreads() < 1) {
- ALOGE("Cannot register a DeathRecipient without any incoming connections.");
+ ALOGE("Cannot register a DeathRecipient without any incoming threads. Need to set max "
+ "incoming threads to a value greater than 0 before calling linkToDeath.");
return INVALID_OPERATION;
}
} else if constexpr (!kEnableKernelIpc) {
diff --git a/libs/binder/TEST_MAPPING b/libs/binder/TEST_MAPPING
index 2615876..1488400 100644
--- a/libs/binder/TEST_MAPPING
+++ b/libs/binder/TEST_MAPPING
@@ -89,6 +89,9 @@
"name": "CtsRootRollbackManagerHostTestCases"
},
{
+ "name": "StagedRollbackTest"
+ },
+ {
"name": "binderRpcTestNoKernel"
},
{
diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp
index 6c9c28a..21900a0 100644
--- a/libs/gui/Android.bp
+++ b/libs/gui/Android.bp
@@ -254,6 +254,10 @@
lto: {
thin: true,
},
+
+ cflags: [
+ "-Wthread-safety",
+ ],
}
// Used by media codec services exclusively as a static lib for
diff --git a/libs/gui/BLASTBufferQueue.cpp b/libs/gui/BLASTBufferQueue.cpp
index 5d12463..66c0041 100644
--- a/libs/gui/BLASTBufferQueue.cpp
+++ b/libs/gui/BLASTBufferQueue.cpp
@@ -35,6 +35,7 @@
#include <private/gui/ComposerService.h>
#include <private/gui/ComposerServiceAIDL.h>
+#include <android-base/thread_annotations.h>
#include <chrono>
using namespace std::chrono_literals;
@@ -63,6 +64,10 @@
ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
mNumAcquired, ##__VA_ARGS__)
+#define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
+ std::unique_lock _lock{mutex}; \
+ base::ScopedLockAssertion assumeLocked(mutex);
+
void BLASTBufferItemConsumer::onDisconnect() {
Mutex::Autolock lock(mMutex);
mPreviouslyConnected = mCurrentlyConnected;
@@ -207,7 +212,7 @@
int32_t format) {
LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
if (mFormat != format) {
mFormat = format;
mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
@@ -277,7 +282,7 @@
const sp<Fence>& /*presentFence*/,
const std::vector<SurfaceControlStats>& stats) {
{
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
BBQ_TRACE();
BQA_LOGV("transactionCommittedCallback");
if (!mSurfaceControlsWithPendingCallback.empty()) {
@@ -325,7 +330,7 @@
void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
const std::vector<SurfaceControlStats>& stats) {
{
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
BBQ_TRACE();
BQA_LOGV("transactionCallback");
@@ -406,9 +411,8 @@
void BLASTBufferQueue::releaseBufferCallback(
const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
std::optional<uint32_t> currentMaxAcquiredBufferCount) {
+ std::lock_guard _lock{mMutex};
BBQ_TRACE();
-
- std::unique_lock _lock{mMutex};
releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
false /* fakeRelease */);
}
@@ -423,10 +427,8 @@
// to the buffer queue. This will prevent higher latency when we are running
// on a lower refresh rate than the max supported. We only do that for EGL
// clients as others don't care about latency
- const bool isEGL = [&] {
- const auto it = mSubmitted.find(id);
- return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
- }();
+ const auto it = mSubmitted.find(id);
+ const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
if (currentMaxAcquiredBufferCount) {
mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
@@ -485,11 +487,19 @@
status_t BLASTBufferQueue::acquireNextBufferLocked(
const std::optional<SurfaceComposerClient::Transaction*> transaction) {
- // If the next transaction is set, we want to guarantee the our acquire will not fail, so don't
- // include the extra buffer when checking if we can acquire the next buffer.
+ // Check if we have frames available and we have not acquired the maximum number of buffers.
+ // Even with this check, the consumer can fail to acquire an additional buffer if the consumer
+ // has already acquired (mMaxAcquiredBuffers + 1) and the new buffer is not droppable. In this
+ // case mBufferItemConsumer->acquireBuffer will return with NO_BUFFER_AVAILABLE.
if (mNumFrameAvailable == 0) {
- BQA_LOGV("Can't process next buffer. No available frames");
- return NOT_ENOUGH_DATA;
+ BQA_LOGV("Can't acquire next buffer. No available frames");
+ return BufferQueue::NO_BUFFER_AVAILABLE;
+ }
+
+ if (mNumAcquired >= (mMaxAcquiredBuffers + 2)) {
+ BQA_LOGV("Can't acquire next buffer. Already acquired max frames %d max:%d + 2",
+ mNumAcquired, mMaxAcquiredBuffers);
+ return BufferQueue::NO_BUFFER_AVAILABLE;
}
if (mSurfaceControl == nullptr) {
@@ -607,7 +617,7 @@
}
{
- std::unique_lock _lock{mTimestampMutex};
+ std::lock_guard _lock{mTimestampMutex};
auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
if (dequeueTime != mDequeueTimestamps.end()) {
Parcel p;
@@ -662,11 +672,11 @@
void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
SurfaceComposerClient::Transaction* prevTransaction = nullptr;
- bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
{
- std::unique_lock _lock{mMutex};
+ UNIQUE_LOCK_WITH_ASSERTION(mMutex);
BBQ_TRACE();
+ bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
@@ -696,6 +706,15 @@
// flush out the shadow queue
acquireAndReleaseBuffer();
}
+ } else {
+ // Make sure the frame available count is 0 before proceeding with a sync to ensure
+ // the correct frame is used for the sync. The only way mNumFrameAvailable would be
+ // greater than 0 is if we already ran out of buffers previously. This means we
+ // need to flush the buffers before proceeding with the sync.
+ while (mNumFrameAvailable > 0) {
+ BQA_LOGD("waiting until no queued buffers");
+ mCallbackCV.wait(_lock);
+ }
}
}
@@ -711,6 +730,11 @@
item.mFrameNumber, boolToString(syncTransactionSet));
if (syncTransactionSet) {
+ // Add to mSyncedFrameNumbers before waiting in case any buffers are released
+ // while waiting for a free buffer. The release and commit callback will try to
+ // acquire buffers if there are any available, but we don't want it to acquire
+ // in the case where a sync transaction wants the buffer.
+ mSyncedFrameNumbers.emplace(item.mFrameNumber);
// If there's no available buffer and we're in a sync transaction, we need to wait
// instead of returning since we guarantee a buffer will be acquired for the sync.
while (acquireNextBufferLocked(mSyncTransaction) == BufferQueue::NO_BUFFER_AVAILABLE) {
@@ -723,7 +747,6 @@
incStrong((void*)transactionCommittedCallbackThunk);
mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
static_cast<void*>(this));
- mSyncedFrameNumbers.emplace(item.mFrameNumber);
if (mAcquireSingleBuffer) {
prevCallback = mTransactionReadyCallback;
prevTransaction = mSyncTransaction;
@@ -745,25 +768,24 @@
}
void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
- std::unique_lock _lock{mTimestampMutex};
+ std::lock_guard _lock{mTimestampMutex};
mDequeueTimestamps[bufferId] = systemTime();
};
void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
- std::unique_lock _lock{mTimestampMutex};
+ std::lock_guard _lock{mTimestampMutex};
mDequeueTimestamps.erase(bufferId);
};
void BLASTBufferQueue::syncNextTransaction(
std::function<void(SurfaceComposerClient::Transaction*)> callback,
bool acquireSingleBuffer) {
- BBQ_TRACE();
-
std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
SurfaceComposerClient::Transaction* prevTransaction = nullptr;
{
std::lock_guard _lock{mMutex};
+ BBQ_TRACE();
// We're about to overwrite the previous call so we should invoke that callback
// immediately.
if (mTransactionReadyCallback) {
@@ -829,8 +851,8 @@
class BBQSurface : public Surface {
private:
std::mutex mMutex;
- sp<BLASTBufferQueue> mBbq;
- bool mDestroyed = false;
+ sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
+ bool mDestroyed GUARDED_BY(mMutex) = false;
public:
BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
@@ -851,7 +873,7 @@
status_t setFrameRate(float frameRate, int8_t compatibility,
int8_t changeFrameRateStrategy) override {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
if (mDestroyed) {
return DEAD_OBJECT;
}
@@ -864,7 +886,7 @@
status_t setFrameTimelineInfo(uint64_t frameNumber,
const FrameTimelineInfo& frameTimelineInfo) override {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
if (mDestroyed) {
return DEAD_OBJECT;
}
@@ -874,7 +896,7 @@
void destroy() override {
Surface::destroy();
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
mDestroyed = true;
mBbq = nullptr;
}
@@ -884,7 +906,7 @@
// no timing issues.
status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
bool shouldBeSeamless) {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
SurfaceComposerClient::Transaction t;
return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
@@ -894,20 +916,20 @@
const FrameTimelineInfo& frameTimelineInfo) {
ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
frameNumber, frameTimelineInfo.vsyncId);
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
return OK;
}
void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
SurfaceComposerClient::Transaction t;
t.setSidebandStream(mSurfaceControl, stream).apply();
}
sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
sp<IBinder> scHandle = nullptr;
if (includeSurfaceControlHandle && mSurfaceControl) {
scHandle = mSurfaceControl->getHandle();
@@ -1098,6 +1120,7 @@
}
uint32_t BLASTBufferQueue::getLastTransformHint() const {
+ std::lock_guard _lock{mMutex};
if (mSurfaceControl != nullptr) {
return mSurfaceControl->getTransformHint();
} else {
@@ -1106,18 +1129,18 @@
}
uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
return mLastAcquiredFrameNumber;
}
bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
}
void BLASTBufferQueue::setTransactionHangCallback(
std::function<void(const std::string&)> callback) {
- std::unique_lock _lock{mMutex};
+ std::lock_guard _lock{mMutex};
mTransactionHangCallback = callback;
}
diff --git a/libs/gui/ITransactionCompletedListener.cpp b/libs/gui/ITransactionCompletedListener.cpp
index 985c549..ffe79a3 100644
--- a/libs/gui/ITransactionCompletedListener.cpp
+++ b/libs/gui/ITransactionCompletedListener.cpp
@@ -39,6 +39,12 @@
} // Anonymous namespace
+namespace { // Anonymous
+
+constexpr int32_t kSerializedCallbackTypeOnCompelteWithJankData = 2;
+
+} // Anonymous namespace
+
status_t FrameEventHistoryStats::writeToParcel(Parcel* output) const {
status_t err = output->writeUint64(frameNumber);
if (err != NO_ERROR) return err;
@@ -349,7 +355,11 @@
status_t CallbackId::writeToParcel(Parcel* output) const {
SAFE_PARCEL(output->writeInt64, id);
- SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(type));
+ if (type == Type::ON_COMPLETE && includeJankData) {
+ SAFE_PARCEL(output->writeInt32, kSerializedCallbackTypeOnCompelteWithJankData);
+ } else {
+ SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(type));
+ }
return NO_ERROR;
}
@@ -357,7 +367,13 @@
SAFE_PARCEL(input->readInt64, &id);
int32_t typeAsInt;
SAFE_PARCEL(input->readInt32, &typeAsInt);
- type = static_cast<CallbackId::Type>(typeAsInt);
+ if (typeAsInt == kSerializedCallbackTypeOnCompelteWithJankData) {
+ type = Type::ON_COMPLETE;
+ includeJankData = true;
+ } else {
+ type = static_cast<CallbackId::Type>(typeAsInt);
+ includeJankData = false;
+ }
return NO_ERROR;
}
diff --git a/libs/gui/LayerState.cpp b/libs/gui/LayerState.cpp
index 8372363..7772a65 100644
--- a/libs/gui/LayerState.cpp
+++ b/libs/gui/LayerState.cpp
@@ -684,6 +684,9 @@
what |= eDimmingEnabledChanged;
dimmingEnabled = other.dimmingEnabled;
}
+ if (other.what & eFlushJankData) {
+ what |= eFlushJankData;
+ }
if ((other.what & what) != other.what) {
ALOGE("Unmerged SurfaceComposer Transaction properties. LayerState::merge needs updating? "
"other.what=0x%" PRIX64 " what=0x%" PRIX64 " unmerged flags=0x%" PRIX64,
diff --git a/libs/gui/SurfaceComposerClient.cpp b/libs/gui/SurfaceComposerClient.cpp
index 9092f5f..a345938 100644
--- a/libs/gui/SurfaceComposerClient.cpp
+++ b/libs/gui/SurfaceComposerClient.cpp
@@ -53,6 +53,7 @@
#include <ui/DisplayState.h>
#include <ui/DynamicDisplayInfo.h>
+#include <android-base/thread_annotations.h>
#include <private/gui/ComposerService.h>
#include <private/gui/ComposerServiceAIDL.h>
@@ -81,6 +82,8 @@
int64_t generateId() {
return (((int64_t)getpid()) << 32) | ++idCounter;
}
+
+void emptyCallback(nsecs_t, const sp<Fence>&, const std::vector<SurfaceControlStats>&) {}
} // namespace
ComposerService::ComposerService()
@@ -248,6 +251,14 @@
surfaceControls,
CallbackId::Type callbackType) {
std::lock_guard<std::mutex> lock(mMutex);
+ return addCallbackFunctionLocked(callbackFunction, surfaceControls, callbackType);
+}
+
+CallbackId TransactionCompletedListener::addCallbackFunctionLocked(
+ const TransactionCompletedCallback& callbackFunction,
+ const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
+ surfaceControls,
+ CallbackId::Type callbackType) {
startListeningLocked();
CallbackId callbackId(getNextIdLocked(), callbackType);
@@ -256,6 +267,11 @@
for (const auto& surfaceControl : surfaceControls) {
callbackSurfaceControls[surfaceControl->getHandle()] = surfaceControl;
+
+ if (callbackType == CallbackId::Type::ON_COMPLETE &&
+ mJankListeners.count(surfaceControl->getLayerId()) != 0) {
+ callbackId.includeJankData = true;
+ }
}
return callbackId;
@@ -304,15 +320,26 @@
}
void TransactionCompletedListener::addSurfaceControlToCallbacks(
- const sp<SurfaceControl>& surfaceControl,
- const std::unordered_set<CallbackId, CallbackIdHash>& callbackIds) {
+ SurfaceComposerClient::CallbackInfo& callbackInfo,
+ const sp<SurfaceControl>& surfaceControl) {
std::lock_guard<std::mutex> lock(mMutex);
- for (auto callbackId : callbackIds) {
+ bool includingJankData = false;
+ for (auto callbackId : callbackInfo.callbackIds) {
mCallbacks[callbackId].surfaceControls.emplace(std::piecewise_construct,
std::forward_as_tuple(
surfaceControl->getHandle()),
std::forward_as_tuple(surfaceControl));
+ includingJankData = includingJankData || callbackId.includeJankData;
+ }
+
+ // If no registered callback is requesting jank data, but there is a jank listener registered
+ // on the new surface control, add a synthetic callback that requests the jank data.
+ if (!includingJankData && mJankListeners.count(surfaceControl->getLayerId()) != 0) {
+ CallbackId callbackId =
+ addCallbackFunctionLocked(&emptyCallback, callbackInfo.surfaceControls,
+ CallbackId::Type::ON_COMPLETE);
+ callbackInfo.callbackIds.emplace(callbackId);
}
}
@@ -929,8 +956,7 @@
// register all surface controls for all callbackIds for this listener that is merging
for (const auto& surfaceControl : currentProcessCallbackInfo.surfaceControls) {
TransactionCompletedListener::getInstance()
- ->addSurfaceControlToCallbacks(surfaceControl,
- currentProcessCallbackInfo.callbackIds);
+ ->addSurfaceControlToCallbacks(currentProcessCallbackInfo, surfaceControl);
}
}
@@ -1180,6 +1206,19 @@
void SurfaceComposerClient::Transaction::setDefaultApplyToken(sp<IBinder> applyToken) {
sApplyToken = applyToken;
}
+
+status_t SurfaceComposerClient::Transaction::sendSurfaceFlushJankDataTransaction(
+ const sp<SurfaceControl>& sc) {
+ Transaction t;
+ layer_state_t* s = t.getLayerState(sc);
+ if (!s) {
+ return BAD_INDEX;
+ }
+
+ s->what |= layer_state_t::eFlushJankData;
+ t.registerSurfaceControlForCallback(sc);
+ return t.apply(/*sync=*/false, /* oneWay=*/true);
+}
// ---------------------------------------------------------------------------
sp<IBinder> SurfaceComposerClient::createDisplay(const String8& displayName, bool secure,
@@ -1253,8 +1292,7 @@
auto& callbackInfo = mListenerCallbacks[TransactionCompletedListener::getIInstance()];
callbackInfo.surfaceControls.insert(sc);
- TransactionCompletedListener::getInstance()
- ->addSurfaceControlToCallbacks(sc, callbackInfo.callbackIds);
+ TransactionCompletedListener::getInstance()->addSurfaceControlToCallbacks(callbackInfo, sc);
}
SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setPosition(
@@ -2988,6 +3026,7 @@
while (true) {
{
std::unique_lock<std::mutex> lock(mMutex);
+ base::ScopedLockAssertion assumeLocked(mMutex);
callbackInfos = std::move(mCallbackInfos);
mCallbackInfos = {};
}
@@ -3000,6 +3039,7 @@
{
std::unique_lock<std::mutex> lock(mMutex);
+ base::ScopedLockAssertion assumeLocked(mMutex);
if (mCallbackInfos.size() == 0) {
mReleaseCallbackPending.wait(lock);
}
diff --git a/libs/gui/include/gui/BLASTBufferQueue.h b/libs/gui/include/gui/BLASTBufferQueue.h
index c93ab86..8d07162 100644
--- a/libs/gui/include/gui/BLASTBufferQueue.h
+++ b/libs/gui/include/gui/BLASTBufferQueue.h
@@ -44,23 +44,23 @@
mCurrentlyConnected(false),
mPreviouslyConnected(false) {}
- void onDisconnect() override;
+ void onDisconnect() override EXCLUDES(mMutex);
void addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
- FrameEventHistoryDelta* outDelta) override REQUIRES(mMutex);
+ FrameEventHistoryDelta* outDelta) override EXCLUDES(mMutex);
void updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
const sp<Fence>& gpuCompositionDoneFence,
const sp<Fence>& presentFence, const sp<Fence>& prevReleaseFence,
CompositorTiming compositorTiming, nsecs_t latchTime,
- nsecs_t dequeueReadyTime) REQUIRES(mMutex);
- void getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect);
+ nsecs_t dequeueReadyTime) EXCLUDES(mMutex);
+ void getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) EXCLUDES(mMutex);
protected:
- void onSidebandStreamChanged() override REQUIRES(mMutex);
+ void onSidebandStreamChanged() override EXCLUDES(mMutex);
private:
const wp<BLASTBufferQueue> mBLASTBufferQueue;
- uint64_t mCurrentFrameNumber = 0;
+ uint64_t mCurrentFrameNumber GUARDED_BY(mMutex) = 0;
Mutex mMutex;
ConsumerFrameEventHistory mFrameEventHistory GUARDED_BY(mMutex);
@@ -94,7 +94,7 @@
std::optional<uint32_t> currentMaxAcquiredBufferCount);
void releaseBufferCallbackLocked(const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
std::optional<uint32_t> currentMaxAcquiredBufferCount,
- bool fakeRelease);
+ bool fakeRelease) REQUIRES(mMutex);
void syncNextTransaction(std::function<void(SurfaceComposerClient::Transaction*)> callback,
bool acquireSingleBuffer = true);
void stopContinuousSyncTransaction();
@@ -150,7 +150,7 @@
// mNumAcquired (buffers that queued to SF) mPendingRelease.size() (buffers that are held by
// blast). This counter is read by android studio profiler.
std::string mQueuedBufferTrace;
- sp<SurfaceControl> mSurfaceControl;
+ sp<SurfaceControl> mSurfaceControl GUARDED_BY(mMutex);
mutable std::mutex mMutex;
std::condition_variable mCallbackCV;
@@ -252,7 +252,7 @@
// callback for them.
std::queue<sp<SurfaceControl>> mSurfaceControlsWithPendingCallback GUARDED_BY(mMutex);
- uint32_t mCurrentMaxAcquiredBufferCount;
+ uint32_t mCurrentMaxAcquiredBufferCount GUARDED_BY(mMutex);
// Flag to determine if syncTransaction should only acquire a single buffer and then clear or
// continue to acquire buffers until explicitly cleared
@@ -276,8 +276,8 @@
// need to set this flag, notably only in the case where we are transitioning from a previous
// transaction applied by us (one way, may not yet have reached server) and an upcoming
// transaction that will be applied by some sync consumer.
- bool mAppliedLastTransaction = false;
- uint64_t mLastAppliedFrameNumber = 0;
+ bool mAppliedLastTransaction GUARDED_BY(mMutex) = false;
+ uint64_t mLastAppliedFrameNumber GUARDED_BY(mMutex) = 0;
std::function<void(const std::string&)> mTransactionHangCallback;
diff --git a/libs/gui/include/gui/ITransactionCompletedListener.h b/libs/gui/include/gui/ITransactionCompletedListener.h
index d593f56..39bcb4a 100644
--- a/libs/gui/include/gui/ITransactionCompletedListener.h
+++ b/libs/gui/include/gui/ITransactionCompletedListener.h
@@ -40,10 +40,15 @@
class CallbackId : public Parcelable {
public:
int64_t id;
- enum class Type : int32_t { ON_COMPLETE, ON_COMMIT } type;
+ enum class Type : int32_t {
+ ON_COMPLETE = 0,
+ ON_COMMIT = 1,
+ /*reserved for serialization = 2*/
+ } type;
+ bool includeJankData; // Only respected for ON_COMPLETE callbacks.
CallbackId() {}
- CallbackId(int64_t id, Type type) : id(id), type(type) {}
+ CallbackId(int64_t id, Type type) : id(id), type(type), includeJankData(false) {}
status_t writeToParcel(Parcel* output) const override;
status_t readFromParcel(const Parcel* input) override;
diff --git a/libs/gui/include/gui/LayerState.h b/libs/gui/include/gui/LayerState.h
index b8bee72..03a2582 100644
--- a/libs/gui/include/gui/LayerState.h
+++ b/libs/gui/include/gui/LayerState.h
@@ -170,7 +170,7 @@
eTransparentRegionChanged = 0x00000020,
eFlagsChanged = 0x00000040,
eLayerStackChanged = 0x00000080,
- /* unused = 0x00000100, */
+ eFlushJankData = 0x00000100,
/* unused = 0x00000200, */
eDimmingEnabledChanged = 0x00000400,
eShadowRadiusChanged = 0x00000800,
diff --git a/libs/gui/include/gui/SurfaceComposerClient.h b/libs/gui/include/gui/SurfaceComposerClient.h
index 45f4dbe..809ea5a 100644
--- a/libs/gui/include/gui/SurfaceComposerClient.h
+++ b/libs/gui/include/gui/SurfaceComposerClient.h
@@ -742,6 +742,8 @@
static sp<IBinder> getDefaultApplyToken();
static void setDefaultApplyToken(sp<IBinder> applyToken);
+
+ static status_t sendSurfaceFlushJankDataTransaction(const sp<SurfaceControl>& sc);
};
status_t clearLayerFrameStats(const sp<IBinder>& token) const;
@@ -876,10 +878,14 @@
const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
surfaceControls,
CallbackId::Type callbackType);
+ CallbackId addCallbackFunctionLocked(
+ const TransactionCompletedCallback& callbackFunction,
+ const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
+ surfaceControls,
+ CallbackId::Type callbackType) REQUIRES(mMutex);
- void addSurfaceControlToCallbacks(
- const sp<SurfaceControl>& surfaceControl,
- const std::unordered_set<CallbackId, CallbackIdHash>& callbackIds);
+ void addSurfaceControlToCallbacks(SurfaceComposerClient::CallbackInfo& callbackInfo,
+ const sp<SurfaceControl>& surfaceControl);
void addQueueStallListener(std::function<void(const std::string&)> stallListener, void* id);
void removeQueueStallListener(void *id);
@@ -921,7 +927,7 @@
void onTrustedPresentationChanged(int id, bool presentedWithinThresholds) override;
private:
- ReleaseBufferCallback popReleaseBufferCallbackLocked(const ReleaseCallbackId&);
+ ReleaseBufferCallback popReleaseBufferCallbackLocked(const ReleaseCallbackId&) REQUIRES(mMutex);
static sp<TransactionCompletedListener> sInstance;
};
diff --git a/libs/input/TfLiteMotionPredictor.cpp b/libs/input/TfLiteMotionPredictor.cpp
index 1a337ad..40653d3 100644
--- a/libs/input/TfLiteMotionPredictor.cpp
+++ b/libs/input/TfLiteMotionPredictor.cpp
@@ -104,13 +104,13 @@
} // namespace
-TfLiteMotionPredictorBuffers::TfLiteMotionPredictorBuffers(size_t inputLength) {
+TfLiteMotionPredictorBuffers::TfLiteMotionPredictorBuffers(size_t inputLength)
+ : mInputR(inputLength, 0),
+ mInputPhi(inputLength, 0),
+ mInputPressure(inputLength, 0),
+ mInputTilt(inputLength, 0),
+ mInputOrientation(inputLength, 0) {
LOG_ALWAYS_FATAL_IF(inputLength == 0, "Buffer input size must be greater than 0");
- mInputR.resize(inputLength);
- mInputPhi.resize(inputLength);
- mInputPressure.resize(inputLength);
- mInputTilt.resize(inputLength);
- mInputOrientation.resize(inputLength);
}
void TfLiteMotionPredictorBuffers::reset() {
@@ -186,17 +186,11 @@
mAxisTo = sample;
// Push the current sample onto the end of the input buffers.
- mInputR.erase(mInputR.begin());
- mInputPhi.erase(mInputPhi.begin());
- mInputPressure.erase(mInputPressure.begin());
- mInputTilt.erase(mInputTilt.begin());
- mInputOrientation.erase(mInputOrientation.begin());
-
- mInputR.push_back(r);
- mInputPhi.push_back(phi);
- mInputPressure.push_back(sample.pressure);
- mInputTilt.push_back(sample.tilt);
- mInputOrientation.push_back(orientation);
+ mInputR.pushBack(r);
+ mInputPhi.pushBack(phi);
+ mInputPressure.pushBack(sample.pressure);
+ mInputTilt.pushBack(sample.tilt);
+ mInputOrientation.pushBack(orientation);
}
std::unique_ptr<TfLiteMotionPredictorModel> TfLiteMotionPredictorModel::create(
diff --git a/libs/input/tests/Android.bp b/libs/input/tests/Android.bp
index 916a8f2..f07164c 100644
--- a/libs/input/tests/Android.bp
+++ b/libs/input/tests/Android.bp
@@ -19,6 +19,7 @@
"InputEvent_test.cpp",
"InputPublisherAndConsumer_test.cpp",
"MotionPredictor_test.cpp",
+ "RingBuffer_test.cpp",
"TfLiteMotionPredictor_test.cpp",
"TouchResampling_test.cpp",
"TouchVideoFrame_test.cpp",
diff --git a/libs/input/tests/RingBuffer_test.cpp b/libs/input/tests/RingBuffer_test.cpp
new file mode 100644
index 0000000..8a6ef4c
--- /dev/null
+++ b/libs/input/tests/RingBuffer_test.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <vector>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <input/RingBuffer.h>
+
+namespace android {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::Not;
+using ::testing::SizeIs;
+
+TEST(RingBufferTest, PushPop) {
+ RingBuffer<int> buffer(/*capacity=*/3);
+
+ buffer.pushBack(1);
+ buffer.pushBack(2);
+ buffer.pushBack(3);
+ EXPECT_THAT(buffer, ElementsAre(1, 2, 3));
+
+ buffer.pushBack(4);
+ EXPECT_THAT(buffer, ElementsAre(2, 3, 4));
+
+ buffer.pushFront(1);
+ EXPECT_THAT(buffer, ElementsAre(1, 2, 3));
+
+ EXPECT_EQ(1, buffer.popFront());
+ EXPECT_THAT(buffer, ElementsAre(2, 3));
+
+ buffer.pushBack(4);
+ EXPECT_THAT(buffer, ElementsAre(2, 3, 4));
+
+ buffer.pushBack(5);
+ EXPECT_THAT(buffer, ElementsAre(3, 4, 5));
+
+ EXPECT_EQ(5, buffer.popBack());
+ EXPECT_THAT(buffer, ElementsAre(3, 4));
+
+ EXPECT_EQ(4, buffer.popBack());
+ EXPECT_THAT(buffer, ElementsAre(3));
+
+ EXPECT_EQ(3, buffer.popBack());
+ EXPECT_THAT(buffer, ElementsAre());
+
+ buffer.pushBack(1);
+ EXPECT_THAT(buffer, ElementsAre(1));
+
+ EXPECT_EQ(1, buffer.popFront());
+ EXPECT_THAT(buffer, ElementsAre());
+}
+
+TEST(RingBufferTest, ObjectType) {
+ RingBuffer<std::unique_ptr<int>> buffer(/*capacity=*/2);
+ buffer.pushBack(std::make_unique<int>(1));
+ buffer.pushBack(std::make_unique<int>(2));
+ buffer.pushBack(std::make_unique<int>(3));
+
+ EXPECT_EQ(2, *buffer[0]);
+ EXPECT_EQ(3, *buffer[1]);
+}
+
+TEST(RingBufferTest, ConstructConstantValue) {
+ RingBuffer<int> buffer(/*count=*/3, /*value=*/10);
+ EXPECT_THAT(buffer, ElementsAre(10, 10, 10));
+ EXPECT_EQ(3u, buffer.capacity());
+}
+
+TEST(RingBufferTest, Assignment) {
+ RingBuffer<int> a(/*capacity=*/2);
+ a.pushBack(1);
+ a.pushBack(2);
+
+ RingBuffer<int> b(/*capacity=*/3);
+ b.pushBack(10);
+ b.pushBack(20);
+ b.pushBack(30);
+
+ std::swap(a, b);
+ EXPECT_THAT(a, ElementsAre(10, 20, 30));
+ EXPECT_THAT(b, ElementsAre(1, 2));
+
+ a = b;
+ EXPECT_THAT(a, ElementsAreArray(b));
+
+ RingBuffer<int> c(b);
+ EXPECT_THAT(c, ElementsAreArray(b));
+
+ RingBuffer<int> d(std::move(b));
+ EXPECT_EQ(0u, b.capacity());
+ EXPECT_THAT(b, ElementsAre());
+ EXPECT_THAT(d, ElementsAre(1, 2));
+
+ b = std::move(d);
+ EXPECT_THAT(b, ElementsAre(1, 2));
+ EXPECT_THAT(d, ElementsAre());
+ EXPECT_EQ(0u, d.capacity());
+}
+
+TEST(RingBufferTest, Subscripting) {
+ RingBuffer<int> buffer(/*capacity=*/2);
+ buffer.pushBack(1);
+ EXPECT_EQ(1, buffer[0]);
+
+ buffer.pushFront(0);
+ EXPECT_EQ(0, buffer[0]);
+ EXPECT_EQ(1, buffer[1]);
+
+ buffer.pushFront(-1);
+ EXPECT_EQ(-1, buffer[0]);
+ EXPECT_EQ(0, buffer[1]);
+}
+
+TEST(RingBufferTest, Iterator) {
+ RingBuffer<int> buffer(/*capacity=*/3);
+ buffer.pushFront(2);
+ buffer.pushBack(3);
+
+ auto begin = buffer.begin();
+ auto end = buffer.end();
+
+ EXPECT_NE(begin, end);
+ EXPECT_LE(begin, end);
+ EXPECT_GT(end, begin);
+ EXPECT_EQ(end, begin + 2);
+ EXPECT_EQ(begin, end - 2);
+
+ EXPECT_EQ(2, end - begin);
+ EXPECT_EQ(1, end - (begin + 1));
+
+ EXPECT_EQ(2, *begin);
+ ++begin;
+ EXPECT_EQ(3, *begin);
+ --begin;
+ EXPECT_EQ(2, *begin);
+ begin += 1;
+ EXPECT_EQ(3, *begin);
+ begin += -1;
+ EXPECT_EQ(2, *begin);
+ begin -= -1;
+ EXPECT_EQ(3, *begin);
+}
+
+TEST(RingBufferTest, Clear) {
+ RingBuffer<int> buffer(/*capacity=*/2);
+ EXPECT_THAT(buffer, ElementsAre());
+
+ buffer.pushBack(1);
+ EXPECT_THAT(buffer, ElementsAre(1));
+
+ buffer.clear();
+ EXPECT_THAT(buffer, ElementsAre());
+ EXPECT_THAT(buffer, SizeIs(0));
+ EXPECT_THAT(buffer, IsEmpty());
+
+ buffer.pushFront(1);
+ EXPECT_THAT(buffer, ElementsAre(1));
+}
+
+TEST(RingBufferTest, SizeAndIsEmpty) {
+ RingBuffer<int> buffer(/*capacity=*/2);
+ EXPECT_THAT(buffer, SizeIs(0));
+ EXPECT_THAT(buffer, IsEmpty());
+
+ buffer.pushBack(1);
+ EXPECT_THAT(buffer, SizeIs(1));
+ EXPECT_THAT(buffer, Not(IsEmpty()));
+
+ buffer.pushBack(2);
+ EXPECT_THAT(buffer, SizeIs(2));
+ EXPECT_THAT(buffer, Not(IsEmpty()));
+
+ buffer.pushBack(3);
+ EXPECT_THAT(buffer, SizeIs(2));
+ EXPECT_THAT(buffer, Not(IsEmpty()));
+
+ buffer.popFront();
+ EXPECT_THAT(buffer, SizeIs(1));
+ EXPECT_THAT(buffer, Not(IsEmpty()));
+
+ buffer.popBack();
+ EXPECT_THAT(buffer, SizeIs(0));
+ EXPECT_THAT(buffer, IsEmpty());
+}
+
+} // namespace
+} // namespace android
diff --git a/services/inputflinger/dispatcher/TouchState.cpp b/services/inputflinger/dispatcher/TouchState.cpp
index acfd0a2..4258471 100644
--- a/services/inputflinger/dispatcher/TouchState.cpp
+++ b/services/inputflinger/dispatcher/TouchState.cpp
@@ -33,8 +33,7 @@
void TouchState::removeTouchedPointer(int32_t pointerId) {
for (TouchedWindow& touchedWindow : windows) {
- touchedWindow.pointerIds.reset(pointerId);
- touchedWindow.pilferedPointerIds.reset(pointerId);
+ touchedWindow.removeTouchingPointer(pointerId);
}
}
@@ -42,8 +41,7 @@
int32_t pointerId, const sp<android::gui::WindowInfoHandle>& windowHandle) {
for (TouchedWindow& touchedWindow : windows) {
if (touchedWindow.windowHandle == windowHandle) {
- touchedWindow.pointerIds.reset(pointerId);
- touchedWindow.pilferedPointerIds.reset(pointerId);
+ touchedWindow.removeTouchingPointer(pointerId);
return;
}
}
@@ -164,17 +162,7 @@
std::for_each(windows.begin(), windows.end(), [&allPilferedPointerIds](TouchedWindow& w) {
std::bitset<MAX_POINTER_ID + 1> pilferedByOtherWindows =
w.pilferedPointerIds ^ allPilferedPointerIds;
- // TODO(b/211379801) : convert pointerIds to use std::bitset, which would allow us to
- // replace the loop below with a bitwise operation. Currently, the XOR operation above is
- // redundant, but is done to make the code more explicit / easier to convert later.
- for (std::size_t i = 0; i < pilferedByOtherWindows.size(); i++) {
- if (pilferedByOtherWindows.test(i) && !w.pilferedPointerIds.test(i)) {
- // Pointer is pilfered by other windows, but not by this one! Remove it from here.
- // We could call 'removeTouchedPointerFromWindow' here, but it's faster to directly
- // manipulate it.
- w.pointerIds.reset(i);
- }
- }
+ w.pointerIds &= ~pilferedByOtherWindows;
});
std::erase_if(windows, [](const TouchedWindow& w) { return w.pointerIds.none(); });
}
diff --git a/services/inputflinger/dispatcher/TouchedWindow.cpp b/services/inputflinger/dispatcher/TouchedWindow.cpp
index 92f62b5..99c4769 100644
--- a/services/inputflinger/dispatcher/TouchedWindow.cpp
+++ b/services/inputflinger/dispatcher/TouchedWindow.cpp
@@ -50,6 +50,14 @@
it->second.set(pointerId);
}
+void TouchedWindow::removeTouchingPointer(int32_t pointerId) {
+ pointerIds.reset(pointerId);
+ pilferedPointerIds.reset(pointerId);
+ if (pointerIds.none()) {
+ firstDownTimeInTarget.reset();
+ }
+}
+
void TouchedWindow::removeHoveringPointer(int32_t deviceId, int32_t pointerId) {
const auto it = mHoveringPointerIdsByDevice.find(deviceId);
if (it == mHoveringPointerIdsByDevice.end()) {
diff --git a/services/inputflinger/dispatcher/TouchedWindow.h b/services/inputflinger/dispatcher/TouchedWindow.h
index e59e781..aa2e9dd 100644
--- a/services/inputflinger/dispatcher/TouchedWindow.h
+++ b/services/inputflinger/dispatcher/TouchedWindow.h
@@ -43,6 +43,7 @@
bool hasHoveringPointer(int32_t deviceId, int32_t pointerId) const;
void addHoveringPointer(int32_t deviceId, int32_t pointerId);
void removeHoveringPointer(int32_t deviceId, int32_t pointerId);
+ void removeTouchingPointer(int32_t pointerId);
void clearHoveringPointers();
std::string dump() const;
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.cpp b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
index b789156..31fdac9 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
@@ -2156,6 +2156,53 @@
return out;
}
+std::list<NotifyArgs> TouchInputMapper::dispatchGestureButtonRelease(nsecs_t when,
+ uint32_t policyFlags,
+ BitSet32 idBits,
+ nsecs_t readTime) {
+ std::list<NotifyArgs> out;
+ BitSet32 releasedButtons(mLastCookedState.buttonState & ~mCurrentCookedState.buttonState);
+ const int32_t metaState = getContext()->getGlobalMetaState();
+ int32_t buttonState = mLastCookedState.buttonState;
+
+ while (!releasedButtons.isEmpty()) {
+ int32_t actionButton = BitSet32::valueForBit(releasedButtons.clearFirstMarkedBit());
+ buttonState &= ~actionButton;
+ out.push_back(dispatchMotion(when, readTime, policyFlags, mSource,
+ AMOTION_EVENT_ACTION_BUTTON_RELEASE, actionButton, 0,
+ metaState, buttonState, 0,
+ mPointerGesture.lastGestureProperties,
+ mPointerGesture.lastGestureCoords,
+ mPointerGesture.lastGestureIdToIndex, idBits, -1,
+ mOrientedXPrecision, mOrientedYPrecision,
+ mPointerGesture.downTime, MotionClassification::NONE));
+ }
+ return out;
+}
+
+std::list<NotifyArgs> TouchInputMapper::dispatchGestureButtonPress(nsecs_t when,
+ uint32_t policyFlags,
+ BitSet32 idBits,
+ nsecs_t readTime) {
+ std::list<NotifyArgs> out;
+ BitSet32 pressedButtons(mCurrentCookedState.buttonState & ~mLastCookedState.buttonState);
+ const int32_t metaState = getContext()->getGlobalMetaState();
+ int32_t buttonState = mLastCookedState.buttonState;
+
+ while (!pressedButtons.isEmpty()) {
+ int32_t actionButton = BitSet32::valueForBit(pressedButtons.clearFirstMarkedBit());
+ buttonState |= actionButton;
+ out.push_back(dispatchMotion(when, readTime, policyFlags, mSource,
+ AMOTION_EVENT_ACTION_BUTTON_PRESS, actionButton, 0, metaState,
+ buttonState, 0, mPointerGesture.currentGestureProperties,
+ mPointerGesture.currentGestureCoords,
+ mPointerGesture.currentGestureIdToIndex, idBits, -1,
+ mOrientedXPrecision, mOrientedYPrecision,
+ mPointerGesture.downTime, MotionClassification::NONE));
+ }
+ return out;
+}
+
const BitSet32& TouchInputMapper::findActiveIdBits(const CookedPointerData& cookedPointerData) {
if (!cookedPointerData.touchingIdBits.isEmpty()) {
return cookedPointerData.touchingIdBits;
@@ -2540,8 +2587,13 @@
dispatchedGestureIdBits.value & ~mPointerGesture.currentGestureIdBits.value;
}
while (!upGestureIdBits.isEmpty()) {
- uint32_t id = upGestureIdBits.clearFirstMarkedBit();
-
+ if (((mLastCookedState.buttonState & AMOTION_EVENT_BUTTON_PRIMARY) != 0 ||
+ (mLastCookedState.buttonState & AMOTION_EVENT_BUTTON_SECONDARY) != 0) &&
+ mPointerGesture.lastGestureMode == PointerGesture::Mode::BUTTON_CLICK_OR_DRAG) {
+ out += dispatchGestureButtonRelease(when, policyFlags, dispatchedGestureIdBits,
+ readTime);
+ }
+ const uint32_t id = upGestureIdBits.clearFirstMarkedBit();
out.push_back(dispatchMotion(when, readTime, policyFlags, mSource,
AMOTION_EVENT_ACTION_POINTER_UP, 0, flags, metaState,
buttonState, AMOTION_EVENT_EDGE_FLAG_NONE,
@@ -2586,6 +2638,12 @@
mPointerGesture.currentGestureIdToIndex,
dispatchedGestureIdBits, id, 0, 0,
mPointerGesture.downTime, classification));
+ if (((buttonState & AMOTION_EVENT_BUTTON_PRIMARY) != 0 ||
+ (buttonState & AMOTION_EVENT_BUTTON_SECONDARY) != 0) &&
+ mPointerGesture.currentGestureMode == PointerGesture::Mode::BUTTON_CLICK_OR_DRAG) {
+ out += dispatchGestureButtonPress(when, policyFlags, dispatchedGestureIdBits,
+ readTime);
+ }
}
}
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.h b/services/inputflinger/reader/mapper/TouchInputMapper.h
index 87deb39..7b464ef 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.h
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.h
@@ -735,6 +735,14 @@
uint32_t policyFlags);
[[nodiscard]] std::list<NotifyArgs> dispatchButtonPress(nsecs_t when, nsecs_t readTime,
uint32_t policyFlags);
+ [[nodiscard]] std::list<NotifyArgs> dispatchGestureButtonPress(nsecs_t when,
+ uint32_t policyFlags,
+ BitSet32 idBits,
+ nsecs_t readTime);
+ [[nodiscard]] std::list<NotifyArgs> dispatchGestureButtonRelease(nsecs_t when,
+ uint32_t policyFlags,
+ BitSet32 idBits,
+ nsecs_t readTime);
const BitSet32& findActiveIdBits(const CookedPointerData& cookedPointerData);
void cookPointerData();
[[nodiscard]] std::list<NotifyArgs> abortTouches(nsecs_t when, nsecs_t readTime,
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index f8d4256..3605be2 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -2365,6 +2365,102 @@
}
/**
+ * This test is similar to the test above, but the sequence of injected events is different.
+ *
+ * Two windows: a window on the left and a window on the right.
+ * Mouse is hovered over the left window.
+ * Next, we tap on the left window, where the cursor was last seen.
+ *
+ * After that, we inject one finger down onto the right window, and then a second finger down onto
+ * the left window.
+ * The touch is split, so this last gesture should cause 2 ACTION_DOWN events, one in the right
+ * window (first), and then another on the left window (second).
+ * This test reproduces a crash where there is a mismatch between the downTime and eventTime.
+ * In the buggy implementation, second finger down on the left window would cause a crash.
+ */
+TEST_F(InputDispatcherTest, HoverTapAndSplitTouch) {
+ std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
+ sp<FakeWindowHandle> leftWindow =
+ sp<FakeWindowHandle>::make(application, mDispatcher, "Left", ADISPLAY_ID_DEFAULT);
+ leftWindow->setFrame(Rect(0, 0, 200, 200));
+
+ sp<FakeWindowHandle> rightWindow =
+ sp<FakeWindowHandle>::make(application, mDispatcher, "Right", ADISPLAY_ID_DEFAULT);
+ rightWindow->setFrame(Rect(200, 0, 400, 200));
+
+ mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {leftWindow, rightWindow}}});
+
+ const int32_t mouseDeviceId = 6;
+ const int32_t touchDeviceId = 4;
+ // Hover over the left window. Keep the cursor there.
+ ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+ injectMotionEvent(mDispatcher,
+ MotionEventBuilder(AMOTION_EVENT_ACTION_HOVER_ENTER,
+ AINPUT_SOURCE_MOUSE)
+ .deviceId(mouseDeviceId)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_MOUSE)
+ .x(50)
+ .y(50))
+ .build()));
+ leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER));
+
+ // Tap on left window
+ ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+ injectMotionEvent(mDispatcher,
+ MotionEventBuilder(AMOTION_EVENT_ACTION_DOWN,
+ AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+ .x(100)
+ .y(100))
+ .build()));
+
+ ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+ injectMotionEvent(mDispatcher,
+ MotionEventBuilder(AMOTION_EVENT_ACTION_UP,
+ AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+ .x(100)
+ .y(100))
+ .build()));
+ leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_EXIT));
+ leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+ leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_UP));
+
+ // First finger down on right window
+ ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+ injectMotionEvent(mDispatcher,
+ MotionEventBuilder(AMOTION_EVENT_ACTION_DOWN,
+ AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+ .x(300)
+ .y(100))
+ .build()));
+ rightWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+
+ // Second finger down on the left window
+ ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+ injectMotionEvent(mDispatcher,
+ MotionEventBuilder(POINTER_1_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+ .x(300)
+ .y(100))
+ .pointer(PointerBuilder(1, AMOTION_EVENT_TOOL_TYPE_FINGER)
+ .x(100)
+ .y(100))
+ .build()));
+ leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+ rightWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_MOVE));
+
+ // No more events
+ leftWindow->assertNoEvents();
+ rightWindow->assertNoEvents();
+}
+
+/**
* On the display, have a single window, and also an area where there's no window.
* First pointer touches the "no window" area of the screen. Second pointer touches the window.
* Make sure that the window receives the second pointer, and first pointer is simply ignored.
diff --git a/services/inputflinger/tests/InputReader_test.cpp b/services/inputflinger/tests/InputReader_test.cpp
index 1b04375..fe7af80 100644
--- a/services/inputflinger/tests/InputReader_test.cpp
+++ b/services/inputflinger/tests/InputReader_test.cpp
@@ -10084,6 +10084,26 @@
ASSERT_EQ(AMOTION_EVENT_ACTION_HOVER_MOVE, args.action);
ASSERT_NO_FATAL_FAILURE(assertPointerCoords(args.pointerCoords[0], 100 * scale, 100 * scale, 0,
0, 0, 0, 0, 0, 0, 0));
+
+ // BUTTON DOWN
+ processKey(mapper, BTN_LEFT, 1);
+ processSync(mapper);
+
+ // touchinputmapper design sends a move before button press
+ ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+ ASSERT_EQ(AMOTION_EVENT_ACTION_DOWN, args.action);
+ ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+ ASSERT_EQ(AMOTION_EVENT_ACTION_BUTTON_PRESS, args.action);
+
+ // BUTTON UP
+ processKey(mapper, BTN_LEFT, 0);
+ processSync(mapper);
+
+ // touchinputmapper design sends a move after button release
+ ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+ ASSERT_EQ(AMOTION_EVENT_ACTION_BUTTON_RELEASE, args.action);
+ ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+ ASSERT_EQ(AMOTION_EVENT_ACTION_UP, args.action);
}
TEST_F(MultiTouchInputMapperTest, WhenCapturedAndNotCaptured_GetSources) {
diff --git a/services/stats/StatsAidl.cpp b/services/stats/StatsAidl.cpp
index 1348548..0f01507 100644
--- a/services/stats/StatsAidl.cpp
+++ b/services/stats/StatsAidl.cpp
@@ -17,19 +17,72 @@
#define DEBUG false // STOPSHIP if true
#define LOG_TAG "StatsAidl"
+#define VLOG(...) \
+ if (DEBUG) ALOGD(__VA_ARGS__);
+
#include "StatsAidl.h"
#include <log/log.h>
+#include <stats_annotations.h>
+#include <stats_event.h>
#include <statslog.h>
+#include <unordered_map>
+
namespace aidl {
namespace android {
namespace frameworks {
namespace stats {
+template <typename E>
+constexpr typename std::underlying_type<E>::type to_underlying(E e) noexcept {
+ return static_cast<typename std::underlying_type<E>::type>(e);
+}
+
StatsHal::StatsHal() {
}
+bool write_annotation(AStatsEvent* event, const Annotation& annotation) {
+ switch (annotation.value.getTag()) {
+ case AnnotationValue::boolValue: {
+ AStatsEvent_addBoolAnnotation(event, to_underlying(annotation.annotationId),
+ annotation.value.get<AnnotationValue::boolValue>());
+ break;
+ }
+ case AnnotationValue::intValue: {
+ AStatsEvent_addInt32Annotation(event, to_underlying(annotation.annotationId),
+ annotation.value.get<AnnotationValue::intValue>());
+ break;
+ }
+ default: {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool write_atom_annotations(AStatsEvent* event,
+ const std::vector<std::optional<Annotation>>& annotations) {
+ for (const auto& atomAnnotation : annotations) {
+ if (!atomAnnotation) {
+ return false;
+ }
+ if (!write_annotation(event, *atomAnnotation)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool write_field_annotations(AStatsEvent* event, const std::vector<Annotation>& annotations) {
+ for (const auto& fieldAnnotation : annotations) {
+ if (!write_annotation(event, fieldAnnotation)) {
+ return false;
+ }
+ }
+ return true;
+}
+
ndk::ScopedAStatus StatsHal::reportVendorAtom(const VendorAtom& vendorAtom) {
if (vendorAtom.atomId < 100000 || vendorAtom.atomId >= 200000) {
ALOGE("Atom ID %ld is not a valid vendor atom ID", (long)vendorAtom.atomId);
@@ -44,7 +97,30 @@
}
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, vendorAtom.atomId);
+
+ if (vendorAtom.atomAnnotations) {
+ if (!write_atom_annotations(event, *vendorAtom.atomAnnotations)) {
+ ALOGE("Atom ID %ld has incompatible atom level annotation", (long)vendorAtom.atomId);
+ AStatsEvent_release(event);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ -1, "invalid atom annotation");
+ }
+ }
+
+ // populate map for quickier access for VendorAtomValue associated annotations by value index
+ std::unordered_map<int, int> fieldIndexToAnnotationSetMap;
+ if (vendorAtom.valuesAnnotations) {
+ const std::vector<std::optional<AnnotationSet>>& valuesAnnotations =
+ *vendorAtom.valuesAnnotations;
+ for (int i = 0; i < valuesAnnotations.size(); i++) {
+ if (valuesAnnotations[i]) {
+ fieldIndexToAnnotationSetMap[valuesAnnotations[i]->valueIndex] = i;
+ }
+ }
+ }
+
AStatsEvent_writeString(event, vendorAtom.reverseDomainName.c_str());
+ size_t atomValueIdx = 0;
for (const auto& atomValue : vendorAtom.values) {
switch (atomValue.getTag()) {
case VendorAtomValue::intValue:
@@ -143,12 +219,37 @@
AStatsEvent_writeByteArray(event, byteArrayValue->data(), byteArrayValue->size());
break;
}
+ default: {
+ AStatsEvent_release(event);
+ ALOGE("Atom ID %ld has invalid atomValue.getTag", (long)vendorAtom.atomId);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ -1, "invalid atomValue.getTag");
+ break;
+ }
}
+
+ const auto& valueAnnotationIndex = fieldIndexToAnnotationSetMap.find(atomValueIdx);
+ if (valueAnnotationIndex != fieldIndexToAnnotationSetMap.end()) {
+ const std::vector<Annotation>& fieldAnnotations =
+ (*vendorAtom.valuesAnnotations)[valueAnnotationIndex->second]->annotations;
+ VLOG("Atom ID %ld has %ld annotations for field #%ld", (long)vendorAtom.atomId,
+ (long)fieldAnnotations.size(), (long)atomValueIdx + 2);
+ if (!write_field_annotations(event, fieldAnnotations)) {
+ ALOGE("Atom ID %ld has incompatible field level annotation for field #%ld",
+ (long)vendorAtom.atomId, (long)atomValueIdx + 2);
+ AStatsEvent_release(event);
+ return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ -1, "invalid atom field annotation");
+ }
+ }
+ atomValueIdx++;
}
AStatsEvent_build(event);
const int ret = AStatsEvent_write(event);
AStatsEvent_release(event);
-
+ if (ret <= 0) {
+ ALOGE("Error writing Atom ID %ld. Result: %d", (long)vendorAtom.atomId, ret);
+ }
return ret <= 0 ? ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(ret,
"report atom failed")
: ndk::ScopedAStatus::ok();
diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h
index 6b5d1d7..b86d9be 100644
--- a/services/surfaceflinger/DisplayDevice.h
+++ b/services/surfaceflinger/DisplayDevice.h
@@ -24,6 +24,7 @@
#include <android-base/thread_annotations.h>
#include <android/native_window.h>
#include <binder/IBinder.h>
+#include <ftl/concat.h>
#include <gui/LayerState.h>
#include <math/mat4.h>
#include <renderengine/RenderEngine.h>
@@ -300,8 +301,8 @@
mutable std::mutex mActiveModeLock;
ActiveModeInfo mDesiredActiveMode GUARDED_BY(mActiveModeLock);
- TracedOrdinal<bool> mDesiredActiveModeChanged
- GUARDED_BY(mActiveModeLock) = {"DesiredActiveModeChanged", false};
+ TracedOrdinal<bool> mDesiredActiveModeChanged GUARDED_BY(mActiveModeLock) =
+ {ftl::Concat("DesiredActiveModeChanged-", getId().value).c_str(), false};
ActiveModeInfo mUpcomingActiveMode GUARDED_BY(kMainThreadContext);
};
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index 8e74716..6d94079 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -175,8 +175,8 @@
displayData.lastPresentTimestamp = timestamp;
}
- const ftl::Concat tag("HW_VSYNC_", displayIdOpt->value);
- ATRACE_INT(tag.c_str(), displayData.vsyncTraceToggle);
+ ATRACE_INT(ftl::Concat("HW_VSYNC_", displayIdOpt->value).c_str(),
+ displayData.vsyncTraceToggle);
displayData.vsyncTraceToggle = !displayData.vsyncTraceToggle;
return displayIdOpt;
@@ -377,8 +377,8 @@
displayData.vsyncEnabled = enabled;
- const auto tag = "HW_VSYNC_ON_" + to_string(displayId);
- ATRACE_INT(tag.c_str(), enabled == hal::Vsync::ENABLE ? 1 : 0);
+ ATRACE_INT(ftl::Concat("HW_VSYNC_ON_", displayId.value).c_str(),
+ enabled == hal::Vsync::ENABLE ? 1 : 0);
}
status_t HWComposer::setClientTarget(HalDisplayId displayId, uint32_t slot,
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 31ee91e..66c2fb6 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -734,6 +734,40 @@
return (p != nullptr) ? p->isSecure() : false;
}
+void Layer::transferAvailableJankData(const std::deque<sp<CallbackHandle>>& handles,
+ std::vector<JankData>& jankData) {
+ if (mPendingJankClassifications.empty() ||
+ !mPendingJankClassifications.front()->getJankType()) {
+ return;
+ }
+
+ bool includeJankData = false;
+ for (const auto& handle : handles) {
+ for (const auto& cb : handle->callbackIds) {
+ if (cb.includeJankData) {
+ includeJankData = true;
+ break;
+ }
+ }
+
+ if (includeJankData) {
+ jankData.reserve(mPendingJankClassifications.size());
+ break;
+ }
+ }
+
+ while (!mPendingJankClassifications.empty() &&
+ mPendingJankClassifications.front()->getJankType()) {
+ if (includeJankData) {
+ std::shared_ptr<frametimeline::SurfaceFrame> surfaceFrame =
+ mPendingJankClassifications.front();
+ jankData.emplace_back(
+ JankData(surfaceFrame->getToken(), surfaceFrame->getJankType().value()));
+ }
+ mPendingJankClassifications.pop_front();
+ }
+}
+
// ----------------------------------------------------------------------------
// transaction
// ----------------------------------------------------------------------------
@@ -2798,16 +2832,7 @@
}
std::vector<JankData> jankData;
- jankData.reserve(mPendingJankClassifications.size());
- while (!mPendingJankClassifications.empty() &&
- mPendingJankClassifications.front()->getJankType()) {
- std::shared_ptr<frametimeline::SurfaceFrame> surfaceFrame =
- mPendingJankClassifications.front();
- mPendingJankClassifications.pop_front();
- jankData.emplace_back(
- JankData(surfaceFrame->getToken(), surfaceFrame->getJankType().value()));
- }
-
+ transferAvailableJankData(mDrawingState.callbackHandles, jankData);
mFlinger->getTransactionCallbackInvoker().addCallbackHandles(mDrawingState.callbackHandles,
jankData);
mDrawingState.callbackHandles = {};
@@ -3106,6 +3131,7 @@
return false;
}
+ std::deque<sp<CallbackHandle>> remainingHandles;
for (const auto& handle : handles) {
// If this transaction set a buffer on this layer, release its previous buffer
handle->releasePreviousBuffer = mReleasePreviousBuffer;
@@ -3120,11 +3146,19 @@
mDrawingState.callbackHandles.push_back(handle);
} else { // If this layer will NOT need to be relatched and presented this frame
- // Notify the transaction completed thread this handle is done
- mFlinger->getTransactionCallbackInvoker().registerUnpresentedCallbackHandle(handle);
+ // Queue this handle to be notified below.
+ remainingHandles.push_back(handle);
}
}
+ if (!remainingHandles.empty()) {
+ // Notify the transaction completed threads these handles are done. These are only the
+ // handles that were not added to the mDrawingState, which will be notified later.
+ std::vector<JankData> jankData;
+ transferAvailableJankData(remainingHandles, jankData);
+ mFlinger->getTransactionCallbackInvoker().addCallbackHandles(remainingHandles, jankData);
+ }
+
mReleasePreviousBuffer = false;
mCallbackHandleAcquireTimeOrFence = -1;
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 3d4f03f..07c01d8 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -1064,6 +1064,11 @@
void updateChildrenSnapshots(bool updateGeometry);
+ // Fills the provided vector with the currently available JankData and removes the processed
+ // JankData from the pending list.
+ void transferAvailableJankData(const std::deque<sp<CallbackHandle>>& handles,
+ std::vector<JankData>& jankData);
+
// Cached properties computed from drawing state
// Effective transform taking into account parent transforms and any parent scaling, which is
// a transform from the current layer coordinate space to display(screen) coordinate space.
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index 5e79a5c..eb6d7e4 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -238,19 +238,29 @@
namespace impl {
-EventThread::EventThread(const char* name, std::shared_ptr<scheduler::VsyncSchedule> vsyncSchedule,
- IEventThreadCallback& eventThreadCallback,
+EventThread::EventThread(const char* name, scheduler::VsyncSchedule& vsyncSchedule,
android::frametimeline::TokenManager* tokenManager,
+ ThrottleVsyncCallback throttleVsyncCallback,
+ GetVsyncPeriodFunction getVsyncPeriodFunction,
std::chrono::nanoseconds workDuration,
std::chrono::nanoseconds readyDuration)
: mThreadName(name),
mVsyncTracer(base::StringPrintf("VSYNC-%s", name), 0),
mWorkDuration(base::StringPrintf("VsyncWorkDuration-%s", name), workDuration),
mReadyDuration(readyDuration),
- mVsyncSchedule(std::move(vsyncSchedule)),
- mVsyncRegistration(mVsyncSchedule->getDispatch(), createDispatchCallback(), name),
+ mVsyncSchedule(vsyncSchedule),
+ mVsyncRegistration(
+ vsyncSchedule.getDispatch(),
+ [this](nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
+ onVsync(vsyncTime, wakeupTime, readyTime);
+ },
+ name),
mTokenManager(tokenManager),
- mEventThreadCallback(eventThreadCallback) {
+ mThrottleVsyncCallback(std::move(throttleVsyncCallback)),
+ mGetVsyncPeriodFunction(std::move(getVsyncPeriodFunction)) {
+ LOG_ALWAYS_FATAL_IF(getVsyncPeriodFunction == nullptr,
+ "getVsyncPeriodFunction must not be null");
+
mThread = std::thread([this]() NO_THREAD_SAFETY_ANALYSIS {
std::unique_lock<std::mutex> lock(mMutex);
threadMain(lock);
@@ -361,16 +371,16 @@
}
VsyncEventData vsyncEventData;
- const Fps frameInterval = mEventThreadCallback.getLeaderRenderFrameRate(connection->mOwnerUid);
- vsyncEventData.frameInterval = frameInterval.getPeriodNsecs();
+ nsecs_t frameInterval = mGetVsyncPeriodFunction(connection->mOwnerUid);
+ vsyncEventData.frameInterval = frameInterval;
const auto [presentTime, deadline] = [&]() -> std::pair<nsecs_t, nsecs_t> {
std::lock_guard<std::mutex> lock(mMutex);
- const auto vsyncTime = mVsyncSchedule->getTracker().nextAnticipatedVSyncTimeFrom(
+ const auto vsyncTime = mVsyncSchedule.getTracker().nextAnticipatedVSyncTimeFrom(
systemTime() + mWorkDuration.get().count() + mReadyDuration.count());
return {vsyncTime, vsyncTime - mReadyDuration.count()};
}();
- generateFrameTimeline(vsyncEventData, frameInterval.getPeriodNsecs(),
- systemTime(SYSTEM_TIME_MONOTONIC), presentTime, deadline);
+ generateFrameTimeline(vsyncEventData, frameInterval, systemTime(SYSTEM_TIME_MONOTONIC),
+ presentTime, deadline);
return vsyncEventData;
}
@@ -533,15 +543,14 @@
const auto throttleVsync = [&] {
const auto& vsyncData = event.vsync.vsyncData;
if (connection->frameRate.isValid()) {
- return !mVsyncSchedule->getTracker()
+ return !mVsyncSchedule.getTracker()
.isVSyncInPhase(vsyncData.preferredExpectedPresentationTime(),
connection->frameRate);
}
- const auto expectedPresentTime =
- TimePoint::fromNs(vsyncData.preferredExpectedPresentationTime());
- return !mEventThreadCallback.isVsyncTargetForUid(expectedPresentTime,
- connection->mOwnerUid);
+ return mThrottleVsyncCallback &&
+ mThrottleVsyncCallback(event.vsync.vsyncData.preferredExpectedPresentationTime(),
+ connection->mOwnerUid);
};
switch (event.header.type) {
@@ -629,11 +638,9 @@
for (const auto& consumer : consumers) {
DisplayEventReceiver::Event copy = event;
if (event.header.type == DisplayEventReceiver::DISPLAY_EVENT_VSYNC) {
- const Fps frameInterval =
- mEventThreadCallback.getLeaderRenderFrameRate(consumer->mOwnerUid);
- copy.vsync.vsyncData.frameInterval = frameInterval.getPeriodNsecs();
- generateFrameTimeline(copy.vsync.vsyncData, frameInterval.getPeriodNsecs(),
- copy.header.timestamp,
+ const int64_t frameInterval = mGetVsyncPeriodFunction(consumer->mOwnerUid);
+ copy.vsync.vsyncData.frameInterval = frameInterval;
+ generateFrameTimeline(copy.vsync.vsyncData, frameInterval, copy.header.timestamp,
event.vsync.vsyncData.preferredExpectedPresentationTime(),
event.vsync.vsyncData.preferredDeadlineTimestamp());
}
@@ -699,26 +706,6 @@
}
}
-void EventThread::onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule> schedule) {
- std::lock_guard<std::mutex> lock(mMutex);
- const bool reschedule = mVsyncRegistration.cancel() == scheduler::CancelResult::Cancelled;
- mVsyncSchedule = std::move(schedule);
- mVsyncRegistration =
- scheduler::VSyncCallbackRegistration(mVsyncSchedule->getDispatch(),
- createDispatchCallback(), mThreadName);
- if (reschedule) {
- mVsyncRegistration.schedule({.workDuration = mWorkDuration.get().count(),
- .readyDuration = mReadyDuration.count(),
- .earliestVsync = mLastVsyncCallbackTime.ns()});
- }
-}
-
-scheduler::VSyncDispatch::Callback EventThread::createDispatchCallback() {
- return [this](nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
- onVsync(vsyncTime, wakeupTime, readyTime);
- };
-}
-
} // namespace impl
} // namespace android
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index aa27091..347dc4a 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -23,7 +23,6 @@
#include <sys/types.h>
#include <utils/Errors.h>
-#include <scheduler/Fps.h>
#include <scheduler/FrameRateMode.h>
#include <condition_variable>
#include <cstdint>
@@ -68,15 +67,6 @@
// Subsequent values are periods.
};
-class IEventThreadCallback {
-public:
- virtual ~IEventThreadCallback() = default;
-
- virtual bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const = 0;
-
- virtual Fps getLeaderRenderFrameRate(uid_t uid) const = 0;
-};
-
class EventThreadConnection : public gui::BnDisplayEventConnection {
public:
EventThreadConnection(EventThread*, uid_t callingUid, ResyncCallback,
@@ -146,17 +136,18 @@
// Retrieves the number of event connections tracked by this EventThread.
virtual size_t getEventThreadConnectionCount() = 0;
-
- virtual void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) = 0;
};
namespace impl {
class EventThread : public android::EventThread {
public:
- EventThread(const char* name, std::shared_ptr<scheduler::VsyncSchedule>, IEventThreadCallback&,
- frametimeline::TokenManager*, std::chrono::nanoseconds workDuration,
- std::chrono::nanoseconds readyDuration);
+ using ThrottleVsyncCallback = std::function<bool(nsecs_t, uid_t)>;
+ using GetVsyncPeriodFunction = std::function<nsecs_t(uid_t)>;
+
+ EventThread(const char* name, scheduler::VsyncSchedule&, frametimeline::TokenManager*,
+ ThrottleVsyncCallback, GetVsyncPeriodFunction,
+ std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration);
~EventThread();
sp<EventThreadConnection> createEventConnection(
@@ -188,8 +179,6 @@
size_t getEventThreadConnectionCount() override;
- void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) override;
-
private:
friend EventThreadTest;
@@ -213,19 +202,17 @@
nsecs_t timestamp, nsecs_t preferredExpectedPresentationTime,
nsecs_t preferredDeadlineTimestamp) const;
- scheduler::VSyncDispatch::Callback createDispatchCallback();
-
const char* const mThreadName;
TracedOrdinal<int> mVsyncTracer;
TracedOrdinal<std::chrono::nanoseconds> mWorkDuration GUARDED_BY(mMutex);
std::chrono::nanoseconds mReadyDuration GUARDED_BY(mMutex);
- std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
+ scheduler::VsyncSchedule& mVsyncSchedule;
TimePoint mLastVsyncCallbackTime GUARDED_BY(mMutex) = TimePoint::now();
scheduler::VSyncCallbackRegistration mVsyncRegistration GUARDED_BY(mMutex);
frametimeline::TokenManager* const mTokenManager;
- // mEventThreadCallback will outlive the EventThread.
- IEventThreadCallback& mEventThreadCallback;
+ const ThrottleVsyncCallback mThrottleVsyncCallback;
+ const GetVsyncPeriodFunction mGetVsyncPeriodFunction;
std::thread mThread;
mutable std::mutex mMutex;
diff --git a/services/surfaceflinger/Scheduler/ISchedulerCallback.h b/services/surfaceflinger/Scheduler/ISchedulerCallback.h
deleted file mode 100644
index 92c2189..0000000
--- a/services/surfaceflinger/Scheduler/ISchedulerCallback.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2023 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <vector>
-
-#include <ui/DisplayId.h>
-
-#include "Display/DisplayModeRequest.h"
-
-namespace android::scheduler {
-
-struct ISchedulerCallback {
- virtual void setVsyncEnabled(PhysicalDisplayId, bool) = 0;
- virtual void requestDisplayModes(std::vector<display::DisplayModeRequest>) = 0;
- virtual void kernelTimerChanged(bool expired) = 0;
- virtual void triggerOnFrameRateOverridesChanged() = 0;
-
-protected:
- ~ISchedulerCallback() = default;
-};
-
-} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.cpp b/services/surfaceflinger/Scheduler/MessageQueue.cpp
index 925f739..dec8f59 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.cpp
+++ b/services/surfaceflinger/Scheduler/MessageQueue.cpp
@@ -75,37 +75,19 @@
mHandler->dispatchFrame(vsyncId, expectedVsyncTime);
}
-void MessageQueue::initVsync(std::shared_ptr<scheduler::VSyncDispatch> dispatch,
+void MessageQueue::initVsync(scheduler::VSyncDispatch& dispatch,
frametimeline::TokenManager& tokenManager,
std::chrono::nanoseconds workDuration) {
std::lock_guard lock(mVsync.mutex);
mVsync.workDuration = workDuration;
mVsync.tokenManager = &tokenManager;
- updateVsyncRegistrationLocked(std::move(dispatch));
-}
-
-void MessageQueue::updateVsyncRegistration(std::shared_ptr<scheduler::VSyncDispatch> dispatch) {
- std::lock_guard lock(mVsync.mutex);
- updateVsyncRegistrationLocked(std::move(dispatch));
-}
-
-void MessageQueue::updateVsyncRegistrationLocked(
- std::shared_ptr<scheduler::VSyncDispatch> dispatch) {
- const bool reschedule = mVsync.registration &&
- mVsync.registration->cancel() == scheduler::CancelResult::Cancelled;
mVsync.registration = std::make_unique<
- scheduler::VSyncCallbackRegistration>(std::move(dispatch),
+ scheduler::VSyncCallbackRegistration>(dispatch,
std::bind(&MessageQueue::vsyncCallback, this,
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3),
"sf");
- if (reschedule) {
- mVsync.scheduledFrameTime =
- mVsync.registration->schedule({.workDuration = mVsync.workDuration.get().count(),
- .readyDuration = 0,
- .earliestVsync = mVsync.lastCallbackTime.ns()});
- }
}
void MessageQueue::destroyVsync() {
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.h b/services/surfaceflinger/Scheduler/MessageQueue.h
index ecb237d..0d59337 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.h
+++ b/services/surfaceflinger/Scheduler/MessageQueue.h
@@ -65,7 +65,7 @@
public:
virtual ~MessageQueue() = default;
- virtual void initVsync(std::shared_ptr<scheduler::VSyncDispatch>, frametimeline::TokenManager&,
+ virtual void initVsync(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
std::chrono::nanoseconds workDuration) = 0;
virtual void destroyVsync() = 0;
virtual void setDuration(std::chrono::nanoseconds workDuration) = 0;
@@ -106,8 +106,6 @@
void vsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime);
- void updateVsyncRegistration(std::shared_ptr<scheduler::VSyncDispatch>) EXCLUDES(mVsync.mutex);
-
private:
virtual void onFrameSignal(ICompositor&, VsyncId, TimePoint expectedVsyncTime) = 0;
@@ -129,13 +127,10 @@
Vsync mVsync;
- void updateVsyncRegistrationLocked(std::shared_ptr<scheduler::VSyncDispatch>)
- REQUIRES(mVsync.mutex);
-
public:
explicit MessageQueue(ICompositor&);
- void initVsync(std::shared_ptr<scheduler::VSyncDispatch>, frametimeline::TokenManager&,
+ void initVsync(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
std::chrono::nanoseconds workDuration) override;
void destroyVsync() override;
void setDuration(std::chrono::nanoseconds workDuration) override;
diff --git a/services/surfaceflinger/Scheduler/OneShotTimer.h b/services/surfaceflinger/Scheduler/OneShotTimer.h
index 02e8719..f95646c 100644
--- a/services/surfaceflinger/Scheduler/OneShotTimer.h
+++ b/services/surfaceflinger/Scheduler/OneShotTimer.h
@@ -40,7 +40,7 @@
OneShotTimer(std::string name, const Interval& interval, const ResetCallback& resetCallback,
const TimeoutCallback& timeoutCallback,
- std::unique_ptr<android::Clock> clock = std::make_unique<SteadyClock>());
+ std::unique_ptr<Clock> clock = std::make_unique<SteadyClock>());
~OneShotTimer();
Duration interval() const { return mInterval; }
@@ -82,7 +82,7 @@
std::thread mThread;
// Clock object for the timer. Mocked in unit tests.
- std::unique_ptr<android::Clock> mClock;
+ std::unique_ptr<Clock> mClock;
// Semaphore to keep mThread synchronized.
sem_t mSemaphore;
diff --git a/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp b/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp
index c5b3e14..f6fe468 100644
--- a/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp
+++ b/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp
@@ -238,7 +238,6 @@
std::string name = to_string(frameRateMode);
ALOGV("%s sorting scores %.2f", name.c_str(), overallScore);
- ATRACE_INT(name.c_str(), static_cast<int>(std::round(overallScore * 100)));
if (!ScoredFrameRate::scoresEqual(overallScore, rhs.overallScore)) {
return overallScore > rhs.overallScore;
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index eed57ef..5fc250b 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -114,18 +114,10 @@
}
void Scheduler::registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr) {
- registerDisplayInternal(displayId, std::move(selectorPtr),
- std::make_shared<VsyncSchedule>(displayId, mFeatures));
-}
-
-void Scheduler::registerDisplayInternal(PhysicalDisplayId displayId,
- RefreshRateSelectorPtr selectorPtr,
- std::shared_ptr<VsyncSchedule> vsyncSchedule) {
demoteLeaderDisplay();
std::scoped_lock lock(mDisplayLock);
mRefreshRateSelectors.emplace_or_replace(displayId, std::move(selectorPtr));
- mVsyncSchedules.emplace_or_replace(displayId, std::move(vsyncSchedule));
promoteLeaderDisplay();
}
@@ -135,7 +127,6 @@
std::scoped_lock lock(mDisplayLock);
mRefreshRateSelectors.erase(displayId);
- mVsyncSchedules.erase(displayId);
// Do not allow removing the final display. Code in the scheduler expects
// there to be at least one display. (This may be relaxed in the future with
@@ -163,49 +154,52 @@
compositor.sample();
}
-std::optional<Fps> Scheduler::getFrameRateOverride(uid_t uid) const {
- std::scoped_lock lock(mDisplayLock);
- return getFrameRateOverrideLocked(uid);
+void Scheduler::createVsyncSchedule(FeatureFlags features) {
+ mVsyncSchedule.emplace(features);
}
-std::optional<Fps> Scheduler::getFrameRateOverrideLocked(uid_t uid) const {
+std::optional<Fps> Scheduler::getFrameRateOverride(uid_t uid) const {
const bool supportsFrameRateOverrideByContent =
- leaderSelectorPtrLocked()->supportsAppFrameRateOverrideByContent();
+ leaderSelectorPtr()->supportsAppFrameRateOverrideByContent();
return mFrameRateOverrideMappings
.getFrameRateOverrideForUid(uid, supportsFrameRateOverrideByContent);
}
-bool Scheduler::isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const {
+bool Scheduler::isVsyncValid(TimePoint expectedVsyncTimestamp, uid_t uid) const {
const auto frameRate = getFrameRateOverride(uid);
if (!frameRate.has_value()) {
return true;
}
- return isVsyncInPhase(expectedVsyncTime, *frameRate);
+ return mVsyncSchedule->getTracker().isVSyncInPhase(expectedVsyncTimestamp.ns(), *frameRate);
}
-bool Scheduler::isVsyncInPhase(TimePoint expectedVsyncTime, Fps frameRate) const {
- return getVsyncSchedule()->getTracker().isVSyncInPhase(expectedVsyncTime.ns(), frameRate);
+bool Scheduler::isVsyncInPhase(TimePoint timePoint, const Fps frameRate) const {
+ return mVsyncSchedule->getTracker().isVSyncInPhase(timePoint.ns(), frameRate);
}
-Fps Scheduler::getLeaderRenderFrameRate(uid_t uid) const {
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
- auto vsyncSchedule = getVsyncScheduleLocked();
+impl::EventThread::ThrottleVsyncCallback Scheduler::makeThrottleVsyncCallback() const {
+ return [this](nsecs_t expectedVsyncTimestamp, uid_t uid) {
+ return !isVsyncValid(TimePoint::fromNs(expectedVsyncTimestamp), uid);
+ };
+}
- const Fps refreshRate = leaderSelectorPtrLocked()->getActiveMode().fps;
- const nsecs_t currentPeriod = vsyncSchedule->period().ns() ?: refreshRate.getPeriodNsecs();
+impl::EventThread::GetVsyncPeriodFunction Scheduler::makeGetVsyncPeriodFunction() const {
+ return [this](uid_t uid) {
+ const Fps refreshRate = leaderSelectorPtr()->getActiveMode().fps;
+ const nsecs_t currentPeriod = mVsyncSchedule->period().ns() ?: refreshRate.getPeriodNsecs();
- const auto frameRate = getFrameRateOverrideLocked(uid);
- if (!frameRate.has_value()) {
- return Fps::fromPeriodNsecs(currentPeriod);
- }
+ const auto frameRate = getFrameRateOverride(uid);
+ if (!frameRate.has_value()) {
+ return currentPeriod;
+ }
- const auto divisor = RefreshRateSelector::getFrameRateDivisor(refreshRate, *frameRate);
- if (divisor <= 1) {
- return Fps::fromPeriodNsecs(currentPeriod);
- }
- return Fps::fromPeriodNsecs(currentPeriod * divisor);
+ const auto divisor = RefreshRateSelector::getFrameRateDivisor(refreshRate, *frameRate);
+ if (divisor <= 1) {
+ return currentPeriod;
+ }
+ return currentPeriod * divisor;
+ };
}
ConnectionHandle Scheduler::createEventThread(Cycle cycle,
@@ -213,7 +207,9 @@
std::chrono::nanoseconds workDuration,
std::chrono::nanoseconds readyDuration) {
auto eventThread = std::make_unique<impl::EventThread>(cycle == Cycle::Render ? "app" : "appSf",
- getVsyncSchedule(), *this, tokenManager,
+ *mVsyncSchedule, tokenManager,
+ makeThrottleVsyncCallback(),
+ makeGetVsyncPeriodFunction(),
workDuration, readyDuration);
auto& handle = cycle == Cycle::Render ? mAppConnectionHandle : mSfConnectionHandle;
@@ -396,57 +392,48 @@
setDuration(config.sfWorkDuration);
}
-void Scheduler::enableHardwareVsync(PhysicalDisplayId id) {
- auto schedule = getVsyncSchedule(id);
- schedule->enableHardwareVsync(mSchedulerCallback);
-}
-
-void Scheduler::disableHardwareVsync(PhysicalDisplayId id, bool disallow) {
- auto schedule = getVsyncSchedule(id);
- schedule->disableHardwareVsync(mSchedulerCallback, disallow);
-}
-
-void Scheduler::resyncAllToHardwareVsync(bool allowToEnable) {
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
-
- for (const auto& [id, _] : mRefreshRateSelectors) {
- resyncToHardwareVsyncLocked(id, allowToEnable);
+void Scheduler::enableHardwareVsync() {
+ std::lock_guard<std::mutex> lock(mHWVsyncLock);
+ if (!mPrimaryHWVsyncEnabled && mHWVsyncAvailable) {
+ mVsyncSchedule->getTracker().resetModel();
+ mSchedulerCallback.setVsyncEnabled(true);
+ mPrimaryHWVsyncEnabled = true;
}
}
-void Scheduler::resyncToHardwareVsyncLocked(PhysicalDisplayId id, bool allowToEnable,
- std::optional<Fps> refreshRate) {
- if (!refreshRate) {
- auto selectorPtr = mRefreshRateSelectors.get(id);
- LOG_ALWAYS_FATAL_IF(!selectorPtr);
- refreshRate = selectorPtr->get()->getActiveMode().modePtr->getFps();
+void Scheduler::disableHardwareVsync(bool makeUnavailable) {
+ std::lock_guard<std::mutex> lock(mHWVsyncLock);
+ if (mPrimaryHWVsyncEnabled) {
+ mSchedulerCallback.setVsyncEnabled(false);
+ mPrimaryHWVsyncEnabled = false;
}
- auto schedule = getVsyncScheduleLocked(id);
- if (allowToEnable) {
- schedule->allowHardwareVsync();
- } else if (!schedule->isHardwareVsyncAllowed()) {
- // Hardware vsync is not currently allowed, so abort the resync
- // attempt for now.
- return;
+ if (makeUnavailable) {
+ mHWVsyncAvailable = false;
}
-
- setVsyncPeriod(schedule, refreshRate->getPeriodNsecs(), false /* force */);
}
-void Scheduler::setRenderRate(PhysicalDisplayId id, Fps renderFrameRate) {
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
+void Scheduler::resyncToHardwareVsync(bool makeAvailable, Fps refreshRate) {
+ {
+ std::lock_guard<std::mutex> lock(mHWVsyncLock);
+ if (makeAvailable) {
+ mHWVsyncAvailable = makeAvailable;
+ } else if (!mHWVsyncAvailable) {
+ // Hardware vsync is not currently available, so abort the resync
+ // attempt for now
+ return;
+ }
+ }
- auto selectorPtr = mRefreshRateSelectors.get(id);
- LOG_ALWAYS_FATAL_IF(!selectorPtr);
- const auto mode = selectorPtr->get()->getActiveMode();
+ setVsyncPeriod(refreshRate.getPeriodNsecs());
+}
+
+void Scheduler::setRenderRate(Fps renderFrameRate) {
+ const auto mode = leaderSelectorPtr()->getActiveMode();
using fps_approx_ops::operator!=;
LOG_ALWAYS_FATAL_IF(renderFrameRate != mode.fps,
- "Mismatch in render frame rates. Selector: %s, Scheduler: %s, Display: "
- "%" PRIu64,
- to_string(mode.fps).c_str(), to_string(renderFrameRate).c_str(), id.value);
+ "Mismatch in render frame rates. Selector: %s, Scheduler: %s",
+ to_string(mode.fps).c_str(), to_string(renderFrameRate).c_str());
ALOGV("%s %s (%s)", __func__, to_string(mode.fps).c_str(),
to_string(mode.modePtr->getFps()).c_str());
@@ -455,7 +442,7 @@
LOG_ALWAYS_FATAL_IF(divisor == 0, "%s <> %s -- not divisors", to_string(mode.fps).c_str(),
to_string(mode.fps).c_str());
- getVsyncScheduleLocked(id)->getTracker().setDivisor(static_cast<unsigned>(divisor));
+ mVsyncSchedule->getTracker().setDivisor(static_cast<unsigned>(divisor));
}
void Scheduler::resync() {
@@ -465,43 +452,49 @@
const nsecs_t last = mLastResyncTime.exchange(now);
if (now - last > kIgnoreDelay) {
- resyncAllToHardwareVsync(false /* allowToEnable */);
+ const auto refreshRate = leaderSelectorPtr()->getActiveMode().modePtr->getFps();
+ resyncToHardwareVsync(false, refreshRate);
}
}
-void Scheduler::setVsyncPeriod(const std::shared_ptr<VsyncSchedule>& schedule, nsecs_t period,
- bool force) {
- ALOGD("Scheduler::setVsyncPeriod");
+void Scheduler::setVsyncPeriod(nsecs_t period) {
if (period <= 0) return;
- // TODO (b/266712910):The old code held mHWVsyncLock before calling
- // startPeriodTransition. Move these into a new method on VsyncSchedule that
- // encapsulates this behavior there and allows holding the lock the whole
- // time.
- schedule->getController().startPeriodTransition(period, force);
- schedule->enableHardwareVsync(mSchedulerCallback);
+ std::lock_guard<std::mutex> lock(mHWVsyncLock);
+ mVsyncSchedule->getController().startPeriodTransition(period);
+
+ if (!mPrimaryHWVsyncEnabled) {
+ mVsyncSchedule->getTracker().resetModel();
+ mSchedulerCallback.setVsyncEnabled(true);
+ mPrimaryHWVsyncEnabled = true;
+ }
}
-bool Scheduler::addResyncSample(PhysicalDisplayId id, nsecs_t timestamp,
- std::optional<nsecs_t> hwcVsyncPeriod) {
- bool periodFlushed = false;
- auto schedule = getVsyncSchedule(id);
- if (schedule->getController().addHwVsyncTimestamp(timestamp, hwcVsyncPeriod, &periodFlushed)) {
- schedule->enableHardwareVsync(mSchedulerCallback);
- } else {
- schedule->disableHardwareVsync(mSchedulerCallback, false /* disallow */);
+void Scheduler::addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
+ bool* periodFlushed) {
+ bool needsHwVsync = false;
+ *periodFlushed = false;
+ { // Scope for the lock
+ std::lock_guard<std::mutex> lock(mHWVsyncLock);
+ if (mPrimaryHWVsyncEnabled) {
+ needsHwVsync =
+ mVsyncSchedule->getController().addHwVsyncTimestamp(timestamp, hwcVsyncPeriod,
+ periodFlushed);
+ }
}
- return periodFlushed;
+ if (needsHwVsync) {
+ enableHardwareVsync();
+ } else {
+ disableHardwareVsync(false);
+ }
}
-void Scheduler::addPresentFence(PhysicalDisplayId id, std::shared_ptr<FenceTime> fence) {
- auto schedule = getVsyncSchedule(id);
- const bool needMoreSignals = schedule->getController().addPresentFence(std::move(fence));
- if (needMoreSignals) {
- schedule->enableHardwareVsync(mSchedulerCallback);
+void Scheduler::addPresentFence(std::shared_ptr<FenceTime> fence) {
+ if (mVsyncSchedule->getController().addPresentFence(std::move(fence))) {
+ enableHardwareVsync();
} else {
- schedule->disableHardwareVsync(mSchedulerCallback, false /* disallow */);
+ disableHardwareVsync(false);
}
}
@@ -553,22 +546,12 @@
}
}
-void Scheduler::setDisplayPowerMode(PhysicalDisplayId id, hal::PowerMode powerMode) {
- const bool isLeader = [this, id]() REQUIRES(kMainThreadContext) {
- ftl::FakeGuard guard(mDisplayLock);
- return id == mLeaderDisplayId;
- }();
- if (isLeader) {
- // TODO (b/255657128): This needs to be handled per display.
+void Scheduler::setDisplayPowerMode(hal::PowerMode powerMode) {
+ {
std::lock_guard<std::mutex> lock(mPolicyLock);
mPolicy.displayPowerMode = powerMode;
}
- {
- std::scoped_lock lock(mDisplayLock);
- auto vsyncSchedule = getVsyncScheduleLocked(id);
- vsyncSchedule->getController().setDisplayPowerMode(powerMode);
- }
- if (!isLeader) return;
+ mVsyncSchedule->getController().setDisplayPowerMode(powerMode);
if (mDisplayPowerTimer) {
mDisplayPowerTimer->reset();
@@ -579,24 +562,6 @@
mLayerHistory.clear();
}
-std::shared_ptr<const VsyncSchedule> Scheduler::getVsyncSchedule(
- std::optional<PhysicalDisplayId> idOpt) const {
- std::scoped_lock lock(mDisplayLock);
- return getVsyncScheduleLocked(idOpt);
-}
-
-std::shared_ptr<const VsyncSchedule> Scheduler::getVsyncScheduleLocked(
- std::optional<PhysicalDisplayId> idOpt) const {
- ftl::FakeGuard guard(kMainThreadContext);
- if (!idOpt) {
- LOG_ALWAYS_FATAL_IF(!mLeaderDisplayId, "Missing a leader!");
- idOpt = mLeaderDisplayId;
- }
- auto scheduleOpt = mVsyncSchedules.get(*idOpt);
- LOG_ALWAYS_FATAL_IF(!scheduleOpt);
- return std::const_pointer_cast<const VsyncSchedule>(scheduleOpt->get());
-}
-
void Scheduler::kernelIdleTimerCallback(TimerState state) {
ATRACE_INT("ExpiredKernelIdleTimer", static_cast<int>(state));
@@ -611,17 +576,12 @@
// If we're not in performance mode then the kernel timer shouldn't do
// anything, as the refresh rate during DPU power collapse will be the
// same.
- resyncAllToHardwareVsync(true /* allowToEnable */);
+ resyncToHardwareVsync(true /* makeAvailable */, refreshRate);
} else if (state == TimerState::Expired && refreshRate <= FPS_THRESHOLD_FOR_KERNEL_TIMER) {
// Disable HW VSYNC if the timer expired, as we don't need it enabled if
// we're not pushing frames, and if we're in PERFORMANCE mode then we'll
// need to update the VsyncController model anyway.
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
- constexpr bool disallow = false;
- for (auto& [_, schedule] : mVsyncSchedules) {
- schedule->disableHardwareVsync(mSchedulerCallback, disallow);
- }
+ disableHardwareVsync(false /* makeUnavailable */);
}
mSchedulerCallback.kernelTimerChanged(state == TimerState::Expired);
@@ -675,23 +635,18 @@
mFrameRateOverrideMappings.dump(dumper);
dumper.eol();
+
+ {
+ utils::Dumper::Section section(dumper, "Hardware VSYNC"sv);
+
+ std::lock_guard lock(mHWVsyncLock);
+ dumper.dump("hwVsyncAvailable"sv, mHWVsyncAvailable);
+ dumper.dump("hwVsyncEnabled"sv, mPrimaryHWVsyncEnabled);
+ }
}
void Scheduler::dumpVsync(std::string& out) const {
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
- if (mLeaderDisplayId) {
- base::StringAppendF(&out, "VsyncSchedule for leader %s:\n",
- to_string(*mLeaderDisplayId).c_str());
- getVsyncScheduleLocked()->dump(out);
- }
- for (auto& [id, vsyncSchedule] : mVsyncSchedules) {
- if (id == mLeaderDisplayId) {
- continue;
- }
- base::StringAppendF(&out, "VsyncSchedule for follower %s:\n", to_string(id).c_str());
- vsyncSchedule->dump(out);
- }
+ mVsyncSchedule->dump(out);
}
bool Scheduler::updateFrameRateOverrides(GlobalSignals consideredSignals, Fps displayRefreshRate) {
@@ -711,7 +666,6 @@
mLeaderDisplayId = leaderIdOpt.value_or(mRefreshRateSelectors.begin()->first);
ALOGI("Display %s is the leader", to_string(*mLeaderDisplayId).c_str());
- auto vsyncSchedule = getVsyncScheduleLocked(*mLeaderDisplayId);
if (const auto leaderPtr = leaderSelectorPtrLocked()) {
leaderPtr->setIdleTimerCallbacks(
{.platform = {.onReset = [this] { idleTimerCallback(TimerState::Reset); },
@@ -721,17 +675,6 @@
[this] { kernelIdleTimerCallback(TimerState::Expired); }}});
leaderPtr->startIdleTimer();
-
- const Fps refreshRate = leaderPtr->getActiveMode().modePtr->getFps();
- setVsyncPeriod(vsyncSchedule, refreshRate.getPeriodNsecs(), true /* force */);
- }
-
- updateVsyncRegistration(vsyncSchedule->getDispatch());
- {
- std::lock_guard<std::mutex> lock(mConnectionsLock);
- for (auto& [_, connection] : mConnections) {
- connection.thread->onNewVsyncSchedule(vsyncSchedule);
- }
}
}
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index 8c8fc21..dae9546 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -43,7 +43,6 @@
#include "Display/DisplayModeRequest.h"
#include "EventThread.h"
#include "FrameRateOverrideMappings.h"
-#include "ISchedulerCallback.h"
#include "LayerHistory.h"
#include "MessageQueue.h"
#include "OneShotTimer.h"
@@ -93,7 +92,17 @@
using GlobalSignals = RefreshRateSelector::GlobalSignals;
-class Scheduler : android::impl::MessageQueue, public IEventThreadCallback {
+struct ISchedulerCallback {
+ virtual void setVsyncEnabled(bool) = 0;
+ virtual void requestDisplayModes(std::vector<display::DisplayModeRequest>) = 0;
+ virtual void kernelTimerChanged(bool expired) = 0;
+ virtual void triggerOnFrameRateOverridesChanged() = 0;
+
+protected:
+ ~ISchedulerCallback() = default;
+};
+
+class Scheduler : android::impl::MessageQueue {
using Impl = android::impl::MessageQueue;
public:
@@ -114,6 +123,8 @@
void run();
+ void createVsyncSchedule(FeatureFlags);
+
using Impl::initVsync;
using Impl::getScheduledFrameTime;
@@ -167,21 +178,9 @@
const VsyncModulator& vsyncModulator() const { return *mVsyncModulator; }
- // In some cases, we should only modulate for the leader display. In those
- // cases, the caller should pass in the relevant display, and the method
- // will no-op if it's not the leader. Other cases are not specific to a
- // display.
template <typename... Args,
typename Handler = std::optional<VsyncConfig> (VsyncModulator::*)(Args...)>
- void modulateVsync(std::optional<PhysicalDisplayId> id, Handler handler, Args... args) {
- if (id) {
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
- if (id != mLeaderDisplayId) {
- return;
- }
- }
-
+ void modulateVsync(Handler handler, Args... args) {
if (const auto config = (*mVsyncModulator.*handler)(args...)) {
setVsyncConfig(*config, getLeaderVsyncPeriod());
}
@@ -190,32 +189,24 @@
void setVsyncConfigSet(const VsyncConfigSet&, Period vsyncPeriod);
// Sets the render rate for the scheduler to run at.
- void setRenderRate(PhysicalDisplayId, Fps);
+ void setRenderRate(Fps);
- void enableHardwareVsync(PhysicalDisplayId);
- void disableHardwareVsync(PhysicalDisplayId, bool makeUnavailable);
+ void enableHardwareVsync();
+ void disableHardwareVsync(bool makeUnavailable);
// Resyncs the scheduler to hardware vsync.
- // If allowToEnable is true, then hardware vsync will be turned on.
+ // If makeAvailable is true, then hardware vsync will be turned on.
// Otherwise, if hardware vsync is not already enabled then this method will
// no-op.
- // If refreshRate is nullopt, use the existing refresh rate of the display.
- void resyncToHardwareVsync(PhysicalDisplayId id, bool allowToEnable,
- std::optional<Fps> refreshRate = std::nullopt)
- EXCLUDES(mDisplayLock) {
- std::scoped_lock lock(mDisplayLock);
- ftl::FakeGuard guard(kMainThreadContext);
- resyncToHardwareVsyncLocked(id, allowToEnable, refreshRate);
- }
+ void resyncToHardwareVsync(bool makeAvailable, Fps refreshRate);
void resync() EXCLUDES(mDisplayLock);
void forceNextResync() { mLastResyncTime = 0; }
- // Passes a vsync sample to VsyncController. Returns true if
- // VsyncController detected that the vsync period changed and false
- // otherwise.
- bool addResyncSample(PhysicalDisplayId, nsecs_t timestamp,
- std::optional<nsecs_t> hwcVsyncPeriod);
- void addPresentFence(PhysicalDisplayId, std::shared_ptr<FenceTime>) EXCLUDES(mDisplayLock);
+ // Passes a vsync sample to VsyncController. periodFlushed will be true if
+ // VsyncController detected that the vsync period changed, and false otherwise.
+ void addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
+ bool* periodFlushed);
+ void addPresentFence(std::shared_ptr<FenceTime>);
// Layers are registered on creation, and unregistered when the weak reference expires.
void registerLayer(Layer*);
@@ -233,22 +224,20 @@
// Indicates that touch interaction is taking place.
void onTouchHint();
- void setDisplayPowerMode(PhysicalDisplayId, hal::PowerMode powerMode)
- REQUIRES(kMainThreadContext);
+ void setDisplayPowerMode(hal::PowerMode powerMode);
- std::shared_ptr<const VsyncSchedule> getVsyncSchedule(
- std::optional<PhysicalDisplayId> idOpt = std::nullopt) const EXCLUDES(mDisplayLock);
- std::shared_ptr<VsyncSchedule> getVsyncSchedule(
- std::optional<PhysicalDisplayId> idOpt = std::nullopt) EXCLUDES(mDisplayLock) {
- return std::const_pointer_cast<VsyncSchedule>(
- static_cast<const Scheduler*>(this)->getVsyncSchedule(idOpt));
- }
+ VsyncSchedule& getVsyncSchedule() { return *mVsyncSchedule; }
- bool isVsyncInPhase(TimePoint expectedVsyncTime, Fps frameRate) const;
+ // Returns true if a given vsync timestamp is considered valid vsync
+ // for a given uid
+ bool isVsyncValid(TimePoint expectedVsyncTimestamp, uid_t uid) const;
+
+ // Checks if a vsync timestamp is in phase for a frame rate
+ bool isVsyncInPhase(TimePoint timePoint, const Fps frameRate) const;
void dump(utils::Dumper&) const;
void dump(ConnectionHandle, std::string&) const;
- void dumpVsync(std::string&) const EXCLUDES(mDisplayLock);
+ void dumpVsync(std::string&) const;
// Returns the preferred refresh rate and frame rate for the leader display.
FrameRateMode getPreferredDisplayMode();
@@ -286,10 +275,6 @@
return mLayerHistory.getLayerFramerate(now, id);
}
- // IEventThreadCallback overrides:
- bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const override;
- Fps getLeaderRenderFrameRate(uid_t uid) const override;
-
private:
friend class TestableScheduler;
@@ -312,12 +297,7 @@
void touchTimerCallback(TimerState);
void displayPowerTimerCallback(TimerState);
- void resyncToHardwareVsyncLocked(PhysicalDisplayId, bool allowToEnable,
- std::optional<Fps> refreshRate = std::nullopt)
- REQUIRES(kMainThreadContext, mDisplayLock);
- void resyncAllToHardwareVsync(bool allowToEnable) EXCLUDES(mDisplayLock);
- void setVsyncPeriod(const std::shared_ptr<VsyncSchedule>&, nsecs_t period, bool force)
- REQUIRES(mDisplayLock);
+ void setVsyncPeriod(nsecs_t period);
void setVsyncConfig(const VsyncConfig&, Period vsyncPeriod);
// Chooses a leader among the registered displays, unless `leaderIdOpt` is specified. The new
@@ -329,12 +309,6 @@
// caller on the main thread to avoid deadlock, since the timer thread locks it before exit.
void demoteLeaderDisplay() REQUIRES(kMainThreadContext) EXCLUDES(mDisplayLock, mPolicyLock);
- void registerDisplayInternal(PhysicalDisplayId, RefreshRateSelectorPtr,
- std::shared_ptr<VsyncSchedule>) REQUIRES(kMainThreadContext)
- EXCLUDES(mDisplayLock);
-
- std::optional<Fps> getFrameRateOverrideLocked(uid_t) const REQUIRES(mDisplayLock);
-
struct Policy;
// Sets the S state of the policy to the T value under mPolicyLock, and chooses a display mode
@@ -372,6 +346,9 @@
void dispatchCachedReportedMode() REQUIRES(mPolicyLock) EXCLUDES(mDisplayLock);
+ android::impl::EventThread::ThrottleVsyncCallback makeThrottleVsyncCallback() const;
+ android::impl::EventThread::GetVsyncPeriodFunction makeGetVsyncPeriodFunction() const;
+
// Stores EventThread associated with a given VSyncSource, and an initial EventThreadConnection.
struct Connection {
sp<EventThreadConnection> connection;
@@ -385,9 +362,14 @@
ConnectionHandle mAppConnectionHandle;
ConnectionHandle mSfConnectionHandle;
+ mutable std::mutex mHWVsyncLock;
+ bool mPrimaryHWVsyncEnabled GUARDED_BY(mHWVsyncLock) = false;
+ bool mHWVsyncAvailable GUARDED_BY(mHWVsyncLock) = false;
+
std::atomic<nsecs_t> mLastResyncTime = 0;
const FeatureFlags mFeatures;
+ std::optional<VsyncSchedule> mVsyncSchedule;
// Shifts the VSYNC phase during certain transactions and refresh rate changes.
const sp<VsyncModulator> mVsyncModulator;
@@ -412,10 +394,6 @@
display::PhysicalDisplayMap<PhysicalDisplayId, RefreshRateSelectorPtr> mRefreshRateSelectors
GUARDED_BY(mDisplayLock) GUARDED_BY(kMainThreadContext);
- // TODO (b/266715559): Store in the same map as mRefreshRateSelectors.
- display::PhysicalDisplayMap<PhysicalDisplayId, std::shared_ptr<VsyncSchedule>> mVsyncSchedules
- GUARDED_BY(mDisplayLock) GUARDED_BY(kMainThreadContext);
-
ftl::Optional<PhysicalDisplayId> mLeaderDisplayId GUARDED_BY(mDisplayLock)
GUARDED_BY(kMainThreadContext);
@@ -435,14 +413,6 @@
.value_or(std::cref(noLeader));
}
- std::shared_ptr<const VsyncSchedule> getVsyncScheduleLocked(
- std::optional<PhysicalDisplayId> idOpt = std::nullopt) const REQUIRES(mDisplayLock);
- std::shared_ptr<VsyncSchedule> getVsyncScheduleLocked(
- std::optional<PhysicalDisplayId> idOpt = std::nullopt) REQUIRES(mDisplayLock) {
- return std::const_pointer_cast<VsyncSchedule>(
- static_cast<const Scheduler*>(this)->getVsyncScheduleLocked(idOpt));
- }
-
struct Policy {
// Policy for choosing the display mode.
LayerHistory::Summary contentRequirements;
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatch.h b/services/surfaceflinger/Scheduler/VSyncDispatch.h
index 77875e3..9520131 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatch.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatch.h
@@ -161,8 +161,7 @@
*/
class VSyncCallbackRegistration {
public:
- VSyncCallbackRegistration(std::shared_ptr<VSyncDispatch>, VSyncDispatch::Callback,
- std::string callbackName);
+ VSyncCallbackRegistration(VSyncDispatch&, VSyncDispatch::Callback, std::string callbackName);
~VSyncCallbackRegistration();
VSyncCallbackRegistration(VSyncCallbackRegistration&&);
@@ -178,7 +177,7 @@
CancelResult cancel();
private:
- std::shared_ptr<VSyncDispatch> mDispatch;
+ std::reference_wrapper<VSyncDispatch> mDispatch;
VSyncDispatch::CallbackToken mToken;
bool mValidToken;
};
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
index 26389eb..73d52cf 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
@@ -215,10 +215,10 @@
}
VSyncDispatchTimerQueue::VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper> tk,
- VsyncSchedule::TrackerPtr tracker,
- nsecs_t timerSlack, nsecs_t minVsyncDistance)
+ VSyncTracker& tracker, nsecs_t timerSlack,
+ nsecs_t minVsyncDistance)
: mTimeKeeper(std::move(tk)),
- mTracker(std::move(tracker)),
+ mTracker(tracker),
mTimerSlack(timerSlack),
mMinVsyncDistance(minVsyncDistance) {}
@@ -255,7 +255,7 @@
}
if (it != skipUpdateIt) {
- callback->update(*mTracker, now);
+ callback->update(mTracker, now);
}
auto const wakeupTime = *callback->wakeupTime();
if (!min || *min > wakeupTime) {
@@ -365,10 +365,10 @@
auto const rearmImminent = now > mIntendedWakeupTime;
if (CC_UNLIKELY(rearmImminent)) {
callback->addPendingWorkloadUpdate(scheduleTiming);
- return getExpectedCallbackTime(*mTracker, now, scheduleTiming);
+ return getExpectedCallbackTime(mTracker, now, scheduleTiming);
}
- const ScheduleResult result = callback->schedule(scheduleTiming, *mTracker, now);
+ const ScheduleResult result = callback->schedule(scheduleTiming, mTracker, now);
if (!result.has_value()) {
return {};
}
@@ -434,15 +434,15 @@
}
}
-VSyncCallbackRegistration::VSyncCallbackRegistration(std::shared_ptr<VSyncDispatch> dispatch,
+VSyncCallbackRegistration::VSyncCallbackRegistration(VSyncDispatch& dispatch,
VSyncDispatch::Callback callback,
std::string callbackName)
- : mDispatch(std::move(dispatch)),
- mToken(mDispatch->registerCallback(std::move(callback), std::move(callbackName))),
+ : mDispatch(dispatch),
+ mToken(dispatch.registerCallback(std::move(callback), std::move(callbackName))),
mValidToken(true) {}
VSyncCallbackRegistration::VSyncCallbackRegistration(VSyncCallbackRegistration&& other)
- : mDispatch(std::move(other.mDispatch)),
+ : mDispatch(other.mDispatch),
mToken(std::move(other.mToken)),
mValidToken(std::move(other.mValidToken)) {
other.mValidToken = false;
@@ -457,28 +457,28 @@
}
VSyncCallbackRegistration::~VSyncCallbackRegistration() {
- if (mValidToken) mDispatch->unregisterCallback(mToken);
+ if (mValidToken) mDispatch.get().unregisterCallback(mToken);
}
ScheduleResult VSyncCallbackRegistration::schedule(VSyncDispatch::ScheduleTiming scheduleTiming) {
if (!mValidToken) {
return std::nullopt;
}
- return mDispatch->schedule(mToken, scheduleTiming);
+ return mDispatch.get().schedule(mToken, scheduleTiming);
}
ScheduleResult VSyncCallbackRegistration::update(VSyncDispatch::ScheduleTiming scheduleTiming) {
if (!mValidToken) {
return std::nullopt;
}
- return mDispatch->update(mToken, scheduleTiming);
+ return mDispatch.get().update(mToken, scheduleTiming);
}
CancelResult VSyncCallbackRegistration::cancel() {
if (!mValidToken) {
return CancelResult::Error;
}
- return mDispatch->cancel(mToken);
+ return mDispatch.get().cancel(mToken);
}
} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
index 6499d69..c3af136 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
@@ -26,11 +26,11 @@
#include <android-base/thread_annotations.h>
#include "VSyncDispatch.h"
-#include "VsyncSchedule.h"
namespace android::scheduler {
class TimeKeeper;
+class VSyncTracker;
// VSyncDispatchTimerQueueEntry is a helper class representing internal state for each entry in
// VSyncDispatchTimerQueue hoisted to public for unit testing.
@@ -120,8 +120,8 @@
// should be grouped into one wakeup.
// \param[in] minVsyncDistance The minimum distance between two vsync estimates before the
// vsyncs are considered the same vsync event.
- VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper>, VsyncSchedule::TrackerPtr,
- nsecs_t timerSlack, nsecs_t minVsyncDistance);
+ VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper>, VSyncTracker&, nsecs_t timerSlack,
+ nsecs_t minVsyncDistance);
~VSyncDispatchTimerQueue();
CallbackToken registerCallback(Callback, std::string callbackName) final;
@@ -148,7 +148,7 @@
static constexpr nsecs_t kInvalidTime = std::numeric_limits<int64_t>::max();
std::unique_ptr<TimeKeeper> const mTimeKeeper;
- VsyncSchedule::TrackerPtr mTracker;
+ VSyncTracker& mTracker;
nsecs_t const mTimerSlack;
nsecs_t const mMinVsyncDistance;
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index a3b8a56..02e12fd 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -31,7 +31,6 @@
#include <android-base/stringprintf.h>
#include <cutils/compiler.h>
#include <cutils/properties.h>
-#include <gui/TraceUtils.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -41,16 +40,14 @@
namespace android::scheduler {
using base::StringAppendF;
-using base::StringPrintf;
static auto constexpr kMaxPercent = 100u;
VSyncPredictor::~VSyncPredictor() = default;
-VSyncPredictor::VSyncPredictor(std::string name, nsecs_t idealPeriod, size_t historySize,
+VSyncPredictor::VSyncPredictor(nsecs_t idealPeriod, size_t historySize,
size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent)
- : mName(name),
- mTraceOn(property_get_bool("debug.sf.vsp_trace", false)),
+ : mTraceOn(property_get_bool("debug.sf.vsp_trace", false)),
kHistorySize(historySize),
kMinimumSamplesForPrediction(minimumSamplesForPrediction),
kOutlierTolerancePercent(std::min(outlierTolerancePercent, kMaxPercent)),
@@ -60,14 +57,12 @@
inline void VSyncPredictor::traceInt64If(const char* name, int64_t value) const {
if (CC_UNLIKELY(mTraceOn)) {
- traceInt64(name, value);
+ ATRACE_INT64(name, value);
}
}
inline void VSyncPredictor::traceInt64(const char* name, int64_t value) const {
- // TODO (b/266817103): Pass in PhysicalDisplayId and use ftl::Concat to
- // avoid unnecessary allocations.
- ATRACE_INT64(StringPrintf("%s %s", name, mName.c_str()).c_str(), value);
+ ATRACE_INT64(name, value);
}
inline size_t VSyncPredictor::next(size_t i) const {
@@ -219,8 +214,8 @@
it->second = {anticipatedPeriod, intercept};
- ALOGV("model update ts %s: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, mName.c_str(),
- timestamp, anticipatedPeriod, intercept);
+ ALOGV("model update ts: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, timestamp,
+ anticipatedPeriod, intercept);
return true;
}
@@ -332,7 +327,7 @@
}
void VSyncPredictor::setDivisor(unsigned divisor) {
- ALOGV("%s %s: %d", __func__, mName.c_str(), divisor);
+ ALOGV("%s: %d", __func__, divisor);
std::lock_guard lock(mMutex);
mDivisor = divisor;
}
@@ -348,7 +343,7 @@
}
void VSyncPredictor::setPeriod(nsecs_t period) {
- ATRACE_FORMAT("%s %s", __func__, mName.c_str());
+ ATRACE_CALL();
traceInt64("VSP-setPeriod", period);
std::lock_guard lock(mMutex);
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
index 1ded54f..305cdb0 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.h
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -29,15 +29,14 @@
class VSyncPredictor : public VSyncTracker {
public:
/*
- * \param [in] name The name of the display this corresponds to.
* \param [in] idealPeriod The initial ideal period to use.
* \param [in] historySize The internal amount of entries to store in the model.
* \param [in] minimumSamplesForPrediction The minimum number of samples to collect before
* predicting. \param [in] outlierTolerancePercent a number 0 to 100 that will be used to filter
* samples that fall outlierTolerancePercent from an anticipated vsync event.
*/
- VSyncPredictor(std::string name, nsecs_t idealPeriod, size_t historySize,
- size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent);
+ VSyncPredictor(nsecs_t idealPeriod, size_t historySize, size_t minimumSamplesForPrediction,
+ uint32_t outlierTolerancePercent);
~VSyncPredictor();
bool addVsyncTimestamp(nsecs_t timestamp) final EXCLUDES(mMutex);
@@ -77,8 +76,6 @@
VSyncPredictor& operator=(VSyncPredictor const&) = delete;
void clearTimestamps() REQUIRES(mMutex);
- const std::string mName;
-
inline void traceInt64If(const char* name, int64_t value) const;
inline void traceInt64(const char* name, int64_t value) const;
bool const mTraceOn;
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.cpp b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
index a831f66..b5f212e 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
@@ -21,7 +21,6 @@
#include <assert.h>
#include <cutils/properties.h>
-#include <gui/TraceUtils.h>
#include <log/log.h>
#include <utils/Trace.h>
@@ -33,7 +32,6 @@
namespace android::scheduler {
using base::StringAppendF;
-using base::StringPrintf;
VsyncController::~VsyncController() = default;
@@ -41,12 +39,12 @@
return systemTime(SYSTEM_TIME_MONOTONIC);
}
-VSyncReactor::VSyncReactor(std::string name, std::unique_ptr<Clock> clock, VSyncTracker& tracker,
+VSyncReactor::VSyncReactor(std::unique_ptr<Clock> clock, VSyncTracker& tracker,
size_t pendingFenceLimit, bool supportKernelIdleTimer)
- : mName(name),
- mClock(std::move(clock)),
+ : mClock(std::move(clock)),
mTracker(tracker),
mPendingLimit(pendingFenceLimit),
+ // TODO(adyabr): change mSupportKernelIdleTimer when the active display changes
mSupportKernelIdleTimer(supportKernelIdleTimer) {}
VSyncReactor::~VSyncReactor() = default;
@@ -116,7 +114,7 @@
}
void VSyncReactor::startPeriodTransitionInternal(nsecs_t newPeriod) {
- ATRACE_FORMAT("%s %s", __func__, mName.c_str());
+ ATRACE_CALL();
mPeriodConfirmationInProgress = true;
mPeriodTransitioningTo = newPeriod;
mMoreSamplesNeeded = true;
@@ -124,20 +122,18 @@
}
void VSyncReactor::endPeriodTransition() {
- ATRACE_FORMAT("%s %s", __func__, mName.c_str());
+ ATRACE_CALL();
mPeriodTransitioningTo.reset();
mPeriodConfirmationInProgress = false;
mLastHwVsync.reset();
}
-void VSyncReactor::startPeriodTransition(nsecs_t period, bool force) {
- // TODO (b/266817103): Pass in PhysicalDisplayId and use ftl::Concat to
- // avoid unnecessary allocations.
- ATRACE_INT64(StringPrintf("VSR-startPeriodTransition %s", mName.c_str()).c_str(), period);
+void VSyncReactor::startPeriodTransition(nsecs_t period) {
+ ATRACE_INT64("VSR-startPeriodTransition", period);
std::lock_guard lock(mMutex);
mLastHwVsync.reset();
- if (!mSupportKernelIdleTimer && period == mTracker.currentPeriod() && !force) {
+ if (!mSupportKernelIdleTimer && period == mTracker.currentPeriod()) {
endPeriodTransition();
setIgnorePresentFencesInternal(false);
mMoreSamplesNeeded = false;
@@ -185,7 +181,7 @@
std::lock_guard lock(mMutex);
if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
- ATRACE_FORMAT("VSR %s: period confirmed", mName.c_str());
+ ATRACE_NAME("VSR: period confirmed");
if (mPeriodTransitioningTo) {
mTracker.setPeriod(*mPeriodTransitioningTo);
*periodFlushed = true;
@@ -199,12 +195,12 @@
endPeriodTransition();
mMoreSamplesNeeded = mTracker.needsMoreSamples();
} else if (mPeriodConfirmationInProgress) {
- ATRACE_FORMAT("VSR %s: still confirming period", mName.c_str());
+ ATRACE_NAME("VSR: still confirming period");
mLastHwVsync = timestamp;
mMoreSamplesNeeded = true;
*periodFlushed = false;
} else {
- ATRACE_FORMAT("VSR %s: adding sample", mName.c_str());
+ ATRACE_NAME("VSR: adding sample");
*periodFlushed = false;
mTracker.addVsyncTimestamp(timestamp);
mMoreSamplesNeeded = mTracker.needsMoreSamples();
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.h b/services/surfaceflinger/Scheduler/VSyncReactor.h
index fd9ca42..4501487 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.h
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.h
@@ -22,7 +22,6 @@
#include <vector>
#include <android-base/thread_annotations.h>
-#include <ui/DisplayId.h>
#include <ui/FenceTime.h>
#include <scheduler/TimeKeeper.h>
@@ -38,14 +37,14 @@
// TODO (b/145217110): consider renaming.
class VSyncReactor : public VsyncController {
public:
- VSyncReactor(std::string name, std::unique_ptr<Clock> clock, VSyncTracker& tracker,
- size_t pendingFenceLimit, bool supportKernelIdleTimer);
+ VSyncReactor(std::unique_ptr<Clock> clock, VSyncTracker& tracker, size_t pendingFenceLimit,
+ bool supportKernelIdleTimer);
~VSyncReactor();
bool addPresentFence(std::shared_ptr<FenceTime>) final;
void setIgnorePresentFences(bool ignore) final;
- void startPeriodTransition(nsecs_t period, bool force) final;
+ void startPeriodTransition(nsecs_t period) final;
bool addHwVsyncTimestamp(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
bool* periodFlushed) final;
@@ -62,7 +61,6 @@
bool periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> hwcVsyncPeriod)
REQUIRES(mMutex);
- const std::string mName;
std::unique_ptr<Clock> const mClock;
VSyncTracker& mTracker;
size_t const mPendingLimit;
diff --git a/services/surfaceflinger/Scheduler/VsyncController.h b/services/surfaceflinger/Scheduler/VsyncController.h
index 9177899..726a420 100644
--- a/services/surfaceflinger/Scheduler/VsyncController.h
+++ b/services/surfaceflinger/Scheduler/VsyncController.h
@@ -63,9 +63,8 @@
* itself. The controller will end the period transition internally.
*
* \param [in] period The period that the system is changing into.
- * \param [in] force True to recalibrate even if period matches the existing period.
*/
- virtual void startPeriodTransition(nsecs_t period, bool force) = 0;
+ virtual void startPeriodTransition(nsecs_t period) = 0;
/*
* Tells the tracker to stop using present fences to get a vsync signal.
diff --git a/services/surfaceflinger/Scheduler/VsyncSchedule.cpp b/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
index 951c1ec..95bc31f 100644
--- a/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
+++ b/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
@@ -21,9 +21,6 @@
#include "VsyncSchedule.h"
-#include "ISchedulerCallback.h"
-#include "Scheduler.h"
-#include "Utils/Dumper.h"
#include "VSyncDispatchTimerQueue.h"
#include "VSyncPredictor.h"
#include "VSyncReactor.h"
@@ -42,8 +39,8 @@
}
public:
- explicit PredictedVsyncTracer(std::shared_ptr<VsyncDispatch> dispatch)
- : mRegistration(std::move(dispatch), makeVsyncCallback(), __func__) {
+ explicit PredictedVsyncTracer(VsyncDispatch& dispatch)
+ : mRegistration(dispatch, makeVsyncCallback(), __func__) {
schedule();
}
@@ -54,23 +51,21 @@
VSyncCallbackRegistration mRegistration;
};
-VsyncSchedule::VsyncSchedule(PhysicalDisplayId id, FeatureFlags features)
- : mId(id),
- mTracker(createTracker(id)),
- mDispatch(createDispatch(mTracker)),
- mController(createController(id, *mTracker, features)) {
+VsyncSchedule::VsyncSchedule(FeatureFlags features)
+ : mTracker(createTracker()),
+ mDispatch(createDispatch(*mTracker)),
+ mController(createController(*mTracker, features)) {
if (features.test(Feature::kTracePredictedVsync)) {
- mTracer = std::make_unique<PredictedVsyncTracer>(mDispatch);
+ mTracer = std::make_unique<PredictedVsyncTracer>(*mDispatch);
}
}
-VsyncSchedule::VsyncSchedule(PhysicalDisplayId id, TrackerPtr tracker, DispatchPtr dispatch,
- ControllerPtr controller)
- : mId(id),
- mTracker(std::move(tracker)),
+VsyncSchedule::VsyncSchedule(TrackerPtr tracker, DispatchPtr dispatch, ControllerPtr controller)
+ : mTracker(std::move(tracker)),
mDispatch(std::move(dispatch)),
mController(std::move(controller)) {}
+VsyncSchedule::VsyncSchedule(VsyncSchedule&&) = default;
VsyncSchedule::~VsyncSchedule() = default;
Period VsyncSchedule::period() const {
@@ -82,13 +77,6 @@
}
void VsyncSchedule::dump(std::string& out) const {
- utils::Dumper dumper(out);
- {
- std::lock_guard<std::mutex> lock(mHwVsyncLock);
- dumper.dump("hwVsyncState", ftl::enum_string(mHwVsyncState));
- dumper.dump("lastHwVsyncState", ftl::enum_string(mLastHwVsyncState));
- }
-
out.append("VsyncController:\n");
mController->dump(out);
@@ -96,72 +84,40 @@
mDispatch->dump(out);
}
-VsyncSchedule::TrackerPtr VsyncSchedule::createTracker(PhysicalDisplayId id) {
+VsyncSchedule::TrackerPtr VsyncSchedule::createTracker() {
// TODO(b/144707443): Tune constants.
constexpr nsecs_t kInitialPeriod = (60_Hz).getPeriodNsecs();
constexpr size_t kHistorySize = 20;
constexpr size_t kMinSamplesForPrediction = 6;
constexpr uint32_t kDiscardOutlierPercent = 20;
- return std::make_unique<VSyncPredictor>(to_string(id), kInitialPeriod, kHistorySize,
- kMinSamplesForPrediction, kDiscardOutlierPercent);
+ return std::make_unique<VSyncPredictor>(kInitialPeriod, kHistorySize, kMinSamplesForPrediction,
+ kDiscardOutlierPercent);
}
-VsyncSchedule::DispatchPtr VsyncSchedule::createDispatch(TrackerPtr tracker) {
+VsyncSchedule::DispatchPtr VsyncSchedule::createDispatch(VsyncTracker& tracker) {
using namespace std::chrono_literals;
// TODO(b/144707443): Tune constants.
constexpr std::chrono::nanoseconds kGroupDispatchWithin = 500us;
constexpr std::chrono::nanoseconds kSnapToSameVsyncWithin = 3ms;
- return std::make_unique<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), std::move(tracker),
+ return std::make_unique<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
kGroupDispatchWithin.count(),
kSnapToSameVsyncWithin.count());
}
-VsyncSchedule::ControllerPtr VsyncSchedule::createController(PhysicalDisplayId id,
- VsyncTracker& tracker,
+VsyncSchedule::ControllerPtr VsyncSchedule::createController(VsyncTracker& tracker,
FeatureFlags features) {
// TODO(b/144707443): Tune constants.
constexpr size_t kMaxPendingFences = 20;
const bool hasKernelIdleTimer = features.test(Feature::kKernelIdleTimer);
- auto reactor = std::make_unique<VSyncReactor>(to_string(id), std::make_unique<SystemClock>(),
- tracker, kMaxPendingFences, hasKernelIdleTimer);
+ auto reactor = std::make_unique<VSyncReactor>(std::make_unique<SystemClock>(), tracker,
+ kMaxPendingFences, hasKernelIdleTimer);
reactor->setIgnorePresentFences(!features.test(Feature::kPresentFences));
return reactor;
}
-void VsyncSchedule::enableHardwareVsync(ISchedulerCallback& callback) {
- std::lock_guard<std::mutex> lock(mHwVsyncLock);
- if (mHwVsyncState == HwVsyncState::Disabled) {
- getTracker().resetModel();
- callback.setVsyncEnabled(mId, true);
- mHwVsyncState = HwVsyncState::Enabled;
- mLastHwVsyncState = HwVsyncState::Enabled;
- }
-}
-
-void VsyncSchedule::disableHardwareVsync(ISchedulerCallback& callback, bool disallow) {
- std::lock_guard<std::mutex> lock(mHwVsyncLock);
- if (mHwVsyncState == HwVsyncState::Enabled) {
- callback.setVsyncEnabled(mId, false);
- mLastHwVsyncState = HwVsyncState::Disabled;
- }
- mHwVsyncState = disallow ? HwVsyncState::Disallowed : HwVsyncState::Disabled;
-}
-
-bool VsyncSchedule::isHardwareVsyncAllowed() const {
- std::lock_guard<std::mutex> lock(mHwVsyncLock);
- return mHwVsyncState != HwVsyncState::Disallowed;
-}
-
-void VsyncSchedule::allowHardwareVsync() {
- std::lock_guard<std::mutex> lock(mHwVsyncLock);
- if (mHwVsyncState == HwVsyncState::Disallowed) {
- mHwVsyncState = HwVsyncState::Disabled;
- }
-}
-
} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VsyncSchedule.h b/services/surfaceflinger/Scheduler/VsyncSchedule.h
index ffb7ad5..173b1d0 100644
--- a/services/surfaceflinger/Scheduler/VsyncSchedule.h
+++ b/services/surfaceflinger/Scheduler/VsyncSchedule.h
@@ -19,10 +19,8 @@
#include <memory>
#include <string>
-#include <ftl/enum.h>
#include <scheduler/Features.h>
#include <scheduler/Time.h>
-#include <ui/DisplayId.h>
namespace android {
class EventThreadTest;
@@ -34,8 +32,6 @@
namespace android::scheduler {
-struct ISchedulerCallback;
-
// TODO(b/185535769): Rename classes, and remove aliases.
class VSyncDispatch;
class VSyncTracker;
@@ -47,7 +43,8 @@
// Schedule that synchronizes to hardware VSYNC of a physical display.
class VsyncSchedule {
public:
- VsyncSchedule(PhysicalDisplayId, FeatureFlags);
+ explicit VsyncSchedule(FeatureFlags);
+ VsyncSchedule(VsyncSchedule&&);
~VsyncSchedule();
Period period() const;
@@ -58,71 +55,30 @@
VsyncTracker& getTracker() { return *mTracker; }
VsyncController& getController() { return *mController; }
- // TODO(b/185535769): Once these are hidden behind the API, they may no
- // longer need to be shared_ptrs.
- using DispatchPtr = std::shared_ptr<VsyncDispatch>;
- using TrackerPtr = std::shared_ptr<VsyncTracker>;
-
// TODO(b/185535769): Remove once VsyncSchedule owns all registrations.
- DispatchPtr getDispatch() { return mDispatch; }
+ VsyncDispatch& getDispatch() { return *mDispatch; }
void dump(std::string&) const;
- // Turn on hardware vsyncs, unless mHwVsyncState is Disallowed, in which
- // case this call is ignored.
- void enableHardwareVsync(ISchedulerCallback&) EXCLUDES(mHwVsyncLock);
-
- // Disable hardware vsyncs. If `disallow` is true, future calls to
- // enableHardwareVsync are ineffective until allowHardwareVsync is called.
- void disableHardwareVsync(ISchedulerCallback&, bool disallow) EXCLUDES(mHwVsyncLock);
-
- // Restore the ability to enable hardware vsync.
- void allowHardwareVsync() EXCLUDES(mHwVsyncLock);
-
- // If true, enableHardwareVsync can enable hardware vsync (if not already
- // enabled). If false, enableHardwareVsync does nothing.
- bool isHardwareVsyncAllowed() const EXCLUDES(mHwVsyncLock);
-
-protected:
- using ControllerPtr = std::unique_ptr<VsyncController>;
-
- // For tests.
- VsyncSchedule(PhysicalDisplayId, TrackerPtr, DispatchPtr, ControllerPtr);
-
private:
friend class TestableScheduler;
friend class android::EventThreadTest;
friend class android::fuzz::SchedulerFuzzer;
- static TrackerPtr createTracker(PhysicalDisplayId);
- static DispatchPtr createDispatch(TrackerPtr);
- static ControllerPtr createController(PhysicalDisplayId, VsyncTracker&, FeatureFlags);
+ using TrackerPtr = std::unique_ptr<VsyncTracker>;
+ using DispatchPtr = std::unique_ptr<VsyncDispatch>;
+ using ControllerPtr = std::unique_ptr<VsyncController>;
- mutable std::mutex mHwVsyncLock;
- enum class HwVsyncState {
- // Hardware vsyncs are currently enabled.
- Enabled,
+ // For tests.
+ VsyncSchedule(TrackerPtr, DispatchPtr, ControllerPtr);
- // Hardware vsyncs are currently disabled. They can be enabled by a call
- // to `enableHardwareVsync`.
- Disabled,
-
- // Hardware vsyncs are not currently allowed (e.g. because the display
- // is off).
- Disallowed,
-
- ftl_last = Disallowed,
- };
- HwVsyncState mHwVsyncState GUARDED_BY(mHwVsyncLock) = HwVsyncState::Disallowed;
-
- // The last state, which may be the current state, or the state prior to setting to Disallowed.
- HwVsyncState mLastHwVsyncState GUARDED_BY(mHwVsyncLock) = HwVsyncState::Disabled;
+ static TrackerPtr createTracker();
+ static DispatchPtr createDispatch(VsyncTracker&);
+ static ControllerPtr createController(VsyncTracker&, FeatureFlags);
class PredictedVsyncTracer;
using TracerPtr = std::unique_ptr<PredictedVsyncTracer>;
- const PhysicalDisplayId mId;
-
// Effectively const except in move constructor.
TrackerPtr mTracker;
DispatchPtr mDispatch;
diff --git a/services/surfaceflinger/Scheduler/include/scheduler/Time.h b/services/surfaceflinger/Scheduler/include/scheduler/Time.h
index bd4e3c2..ba1459a 100644
--- a/services/surfaceflinger/Scheduler/include/scheduler/Time.h
+++ b/services/surfaceflinger/Scheduler/include/scheduler/Time.h
@@ -26,7 +26,7 @@
namespace scheduler {
// TODO(b/185535769): Pull Clock.h to libscheduler to reuse this.
-using SchedulerClock = std::chrono::high_resolution_clock;
+using SchedulerClock = std::chrono::steady_clock;
static_assert(SchedulerClock::is_steady);
} // namespace scheduler
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index a0c3eb0..68ab776 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1130,33 +1130,21 @@
return NO_ERROR;
}
-status_t SurfaceFlinger::getDisplayStats(const sp<IBinder>& displayToken,
- DisplayStatInfo* outStats) {
+status_t SurfaceFlinger::getDisplayStats(const sp<IBinder>&, DisplayStatInfo* outStats) {
if (!outStats) {
return BAD_VALUE;
}
- std::optional<PhysicalDisplayId> displayIdOpt;
- {
- Mutex::Autolock lock(mStateLock);
- displayIdOpt = getPhysicalDisplayIdLocked(displayToken);
- }
-
- if (!displayIdOpt) {
- ALOGE("%s: Invalid physical display token %p", __func__, displayToken.get());
- return NAME_NOT_FOUND;
- }
- const auto schedule = mScheduler->getVsyncSchedule(displayIdOpt);
- outStats->vsyncTime = schedule->vsyncDeadlineAfter(TimePoint::now()).ns();
- outStats->vsyncPeriod = schedule->period().ns();
+ const auto& schedule = mScheduler->getVsyncSchedule();
+ outStats->vsyncTime = schedule.vsyncDeadlineAfter(TimePoint::now()).ns();
+ outStats->vsyncPeriod = schedule.period().ns();
return NO_ERROR;
}
void SurfaceFlinger::setDesiredActiveMode(display::DisplayModeRequest&& request, bool force) {
ATRACE_CALL();
- const auto displayId = request.mode.modePtr->getPhysicalDisplayId();
- const auto display = getDisplayDeviceLocked(displayId);
+ auto display = getDisplayDeviceLocked(request.mode.modePtr->getPhysicalDisplayId());
if (!display) {
ALOGW("%s: display is no longer valid", __func__);
return;
@@ -1169,25 +1157,23 @@
force)) {
case DisplayDevice::DesiredActiveModeAction::InitiateDisplayModeSwitch:
// Set the render rate as setDesiredActiveMode updated it.
- mScheduler->setRenderRate(displayId,
- display->refreshRateSelector().getActiveMode().fps);
+ mScheduler->setRenderRate(display->refreshRateSelector().getActiveMode().fps);
// Schedule a new frame to initiate the display mode switch.
scheduleComposite(FrameHint::kNone);
// Start receiving vsync samples now, so that we can detect a period
// switch.
- mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */,
- mode.modePtr->getFps());
-
+ mScheduler->resyncToHardwareVsync(true, mode.modePtr->getFps());
// As we called to set period, we will call to onRefreshRateChangeCompleted once
// VsyncController model is locked.
- mScheduler->modulateVsync(displayId, &VsyncModulator::onRefreshRateChangeInitiated);
+ mScheduler->modulateVsync(&VsyncModulator::onRefreshRateChangeInitiated);
+
updatePhaseConfiguration(mode.fps);
mScheduler->setModeChangePending(true);
break;
case DisplayDevice::DesiredActiveModeAction::InitiateRenderRateSwitch:
- mScheduler->setRenderRate(displayId, mode.fps);
+ mScheduler->setRenderRate(mode.fps);
updatePhaseConfiguration(mode.fps);
mRefreshRateStats->setRefreshRate(mode.fps);
if (display->getPhysicalId() == mActiveDisplayId && emitEvent) {
@@ -1303,14 +1289,11 @@
}
void SurfaceFlinger::desiredActiveModeChangeDone(const sp<DisplayDevice>& display) {
- const auto desiredActiveMode = display->getDesiredActiveMode();
- const auto& modeOpt = desiredActiveMode->modeOpt;
- const auto displayId = modeOpt->modePtr->getPhysicalDisplayId();
- const auto displayFps = modeOpt->modePtr->getFps();
- const auto renderFps = modeOpt->fps;
+ const auto displayFps = display->getDesiredActiveMode()->modeOpt->modePtr->getFps();
+ const auto renderFps = display->getDesiredActiveMode()->modeOpt->fps;
clearDesiredActiveModeState(display);
- mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, displayFps);
- mScheduler->setRenderRate(displayId, renderFps);
+ mScheduler->resyncToHardwareVsync(true, displayFps);
+ mScheduler->setRenderRate(renderFps);
updatePhaseConfiguration(renderFps);
}
@@ -2047,11 +2030,17 @@
ATRACE_FORMAT("onComposerHalVsync%s", tracePeriod.c_str());
Mutex::Autolock lock(mStateLock);
- if (const auto displayIdOpt = getHwComposer().onVsync(hwcDisplayId, timestamp)) {
- if (mScheduler->addResyncSample(*displayIdOpt, timestamp, vsyncPeriod)) {
- // period flushed
- mScheduler->modulateVsync(displayIdOpt, &VsyncModulator::onRefreshRateChangeCompleted);
- }
+
+ if (const auto displayIdOpt = getHwComposer().onVsync(hwcDisplayId, timestamp);
+ displayIdOpt != mActiveDisplayId) {
+ // Ignore VSYNC for invalid/inactive displays.
+ return;
+ }
+
+ bool periodFlushed = false;
+ mScheduler->addResyncSample(timestamp, vsyncPeriod, &periodFlushed);
+ if (periodFlushed) {
+ mScheduler->modulateVsync(&VsyncModulator::onRefreshRateChangeCompleted);
}
}
@@ -2092,15 +2081,16 @@
mScheduler->forceNextResync();
}
-void SurfaceFlinger::setVsyncEnabled(PhysicalDisplayId id, bool enabled) {
- const char* const whence = __func__;
- ATRACE_FORMAT("%s (%d) for %" PRIu64, whence, enabled, id.value);
+void SurfaceFlinger::setVsyncEnabled(bool enabled) {
+ ATRACE_CALL();
// On main thread to avoid race conditions with display power state.
static_cast<void>(mScheduler->schedule([=]() FTL_FAKE_GUARD(mStateLock) {
- ATRACE_FORMAT("%s (%d) for %" PRIu64 " (main thread)", whence, enabled, id.value);
- if (const auto display = getDisplayDeviceLocked(id); display && display->isPoweredOn()) {
- setHWCVsyncEnabled(id, enabled);
+ mHWCVsyncPendingState = enabled ? hal::Vsync::ENABLE : hal::Vsync::DISABLE;
+
+ if (const auto display = getDefaultDisplayDeviceLocked();
+ display && display->isPoweredOn()) {
+ setHWCVsyncEnabled(display->getPhysicalId(), mHWCVsyncPendingState);
}
}));
}
@@ -2127,13 +2117,13 @@
TimePoint SurfaceFlinger::calculateExpectedPresentTime(TimePoint frameTime) const {
const auto& schedule = mScheduler->getVsyncSchedule();
- const TimePoint vsyncDeadline = schedule->vsyncDeadlineAfter(frameTime);
+ const TimePoint vsyncDeadline = schedule.vsyncDeadlineAfter(frameTime);
if (mScheduler->vsyncModulator().getVsyncConfig().sfOffset > 0) {
return vsyncDeadline;
}
// Inflate the expected present time if we're targeting the next vsync.
- return vsyncDeadline + schedule->period();
+ return vsyncDeadline + schedule.period();
}
void SurfaceFlinger::configure() FTL_FAKE_GUARD(kMainThreadContext) {
@@ -2264,7 +2254,7 @@
ticks<std::milli, float>(mExpectedPresentTime - TimePoint::now()),
mExpectedPresentTime == expectedVsyncTime ? "" : " (adjusted)");
- const Period vsyncPeriod = mScheduler->getVsyncSchedule()->period();
+ const Period vsyncPeriod = mScheduler->getVsyncSchedule().period();
const FenceTimePtr& previousPresentFence = getPreviousPresentFence(frameTime, vsyncPeriod);
// When backpressure propagation is enabled, we want to give a small grace period of 1ms
@@ -2514,7 +2504,7 @@
refreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::milliseconds(mDebugFlashDelay);
}
- const auto prevVsyncTime = mExpectedPresentTime - mScheduler->getVsyncSchedule()->period();
+ const auto prevVsyncTime = mExpectedPresentTime - mScheduler->getVsyncSchedule().period();
const auto hwcMinWorkDuration = mVsyncConfiguration->getCurrentConfigs().hwcMinWorkDuration;
refreshArgs.earliestPresentTime = prevVsyncTime - hwcMinWorkDuration;
@@ -2596,7 +2586,7 @@
// TODO(b/160583065): Enable skip validation when SF caches all client composition layers.
const bool hasGpuUseOrReuse =
mCompositionCoverage.any(CompositionCoverage::Gpu | CompositionCoverage::GpuReuse);
- mScheduler->modulateVsync({}, &VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
+ mScheduler->modulateVsync(&VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
mLayersWithQueuedFrames.clear();
if (mLayerTracingEnabled && mLayerTracing.flagIsSet(LayerTracing::TRACE_COMPOSITION)) {
@@ -2740,9 +2730,9 @@
? mPresentLatencyTracker.trackPendingFrame(compositeTime, presentFenceTime)
: Duration::zero();
- const auto schedule = mScheduler->getVsyncSchedule();
- const TimePoint vsyncDeadline = schedule->vsyncDeadlineAfter(presentTime);
- const Period vsyncPeriod = schedule->period();
+ const auto& schedule = mScheduler->getVsyncSchedule();
+ const TimePoint vsyncDeadline = schedule.vsyncDeadlineAfter(presentTime);
+ const Period vsyncPeriod = schedule.period();
const nsecs_t vsyncPhase = mVsyncConfiguration->getCurrentConfigs().late.sfOffset;
const CompositorTiming compositorTiming(vsyncDeadline.ns(), vsyncPeriod.ns(), vsyncPhase,
@@ -2817,19 +2807,15 @@
mTimeStats->incrementTotalFrames();
mTimeStats->setPresentFenceGlobal(presentFenceTime);
- {
- ftl::FakeGuard guard(mStateLock);
- for (const auto& [id, physicalDisplay] : mPhysicalDisplays) {
- if (auto displayDevice = getDisplayDeviceLocked(id);
- displayDevice && displayDevice->isPoweredOn() && physicalDisplay.isInternal()) {
- auto presentFenceTimeI = defaultDisplay && defaultDisplay->getPhysicalId() == id
- ? std::move(presentFenceTime)
- : std::make_shared<FenceTime>(getHwComposer().getPresentFence(id));
- if (presentFenceTimeI->isValid()) {
- mScheduler->addPresentFence(id, std::move(presentFenceTimeI));
- }
- }
- }
+ const bool isInternalDisplay = defaultDisplay &&
+ FTL_FAKE_GUARD(mStateLock, mPhysicalDisplays)
+ .get(defaultDisplay->getPhysicalId())
+ .transform(&PhysicalDisplay::isInternal)
+ .value_or(false);
+
+ if (isInternalDisplay && defaultDisplay && defaultDisplay->getPowerMode() == hal::PowerMode::ON &&
+ presentFenceTime->isValid()) {
+ mScheduler->addPresentFence(std::move(presentFenceTime));
}
const bool isDisplayConnected =
@@ -2837,7 +2823,7 @@
if (!hasSyncFramework) {
if (isDisplayConnected && defaultDisplay->isPoweredOn()) {
- mScheduler->enableHardwareVsync(defaultDisplay->getPhysicalId());
+ mScheduler->enableHardwareVsync();
}
}
@@ -2948,7 +2934,7 @@
// so we can call commitTransactionsLocked unconditionally.
// We clear the flags with mStateLock held to guarantee that
// mCurrentState won't change until the transaction is committed.
- mScheduler->modulateVsync({}, &VsyncModulator::onTransactionCommit);
+ mScheduler->modulateVsync(&VsyncModulator::onTransactionCommit);
commitTransactionsLocked(clearTransactionFlags(eTransactionMask));
mDebugInTransaction = 0;
@@ -3787,9 +3773,10 @@
mScheduler = std::make_unique<Scheduler>(static_cast<ICompositor&>(*this),
static_cast<ISchedulerCallback&>(*this), features,
std::move(modulatorPtr));
+ mScheduler->createVsyncSchedule(features);
mScheduler->registerDisplay(display->getPhysicalId(), display->holdRefreshRateSelector());
- setVsyncEnabled(display->getPhysicalId(), false);
+ setVsyncEnabled(false);
mScheduler->startTimers();
const auto configs = mVsyncConfiguration->getCurrentConfigs();
@@ -3805,7 +3792,7 @@
/* workDuration */ activeRefreshRate.getPeriod(),
/* readyDuration */ configs.late.sfWorkDuration);
- mScheduler->initVsync(mScheduler->getVsyncSchedule()->getDispatch(),
+ mScheduler->initVsync(mScheduler->getVsyncSchedule().getDispatch(),
*mFrameTimeline->getTokenManager(), configs.late.sfWorkDuration);
mRegionSamplingThread =
@@ -4019,7 +4006,7 @@
void SurfaceFlinger::setTransactionFlags(uint32_t mask, TransactionSchedule schedule,
const sp<IBinder>& applyToken, FrameHint frameHint) {
- mScheduler->modulateVsync({}, &VsyncModulator::setTransactionSchedule, schedule, applyToken);
+ mScheduler->modulateVsync(&VsyncModulator::setTransactionSchedule, schedule, applyToken);
uint32_t transactionFlags = mTransactionFlags.fetch_or(mask);
ATRACE_INT("mTransactionFlags", transactionFlags);
@@ -4048,7 +4035,7 @@
return TransactionReadiness::NotReady;
}
- if (!mScheduler->isVsyncTargetForUid(mExpectedPresentTime, transaction.originUid)) {
+ if (!mScheduler->isVsyncValid(mExpectedPresentTime, transaction.originUid)) {
ATRACE_NAME("!isVsyncValid");
return TransactionReadiness::NotReady;
}
@@ -4192,7 +4179,7 @@
return false;
}
- const Duration earlyLatchVsyncThreshold = mScheduler->getVsyncSchedule()->period() / 2;
+ const Duration earlyLatchVsyncThreshold = mScheduler->getVsyncSchedule().period() / 2;
return predictedPresentTime >= expectedPresentTime &&
predictedPresentTime - expectedPresentTime >= earlyLatchVsyncThreshold;
@@ -4565,8 +4552,10 @@
}
if (layer == nullptr) {
for (auto& [listener, callbackIds] : s.listeners) {
- mTransactionCallbackInvoker.registerUnpresentedCallbackHandle(
- sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+ mTransactionCallbackInvoker.addCallbackHandle(sp<CallbackHandle>::make(listener,
+ callbackIds,
+ s.surface),
+ std::vector<JankData>());
}
return 0;
}
@@ -4843,6 +4832,10 @@
s.trustedPresentationListener);
}
+ if (what & layer_state_t::eFlushJankData) {
+ // Do nothing. Processing the transaction completed listeners currently cause the flush.
+ }
+
if (layer->setTransactionCompletedListeners(callbackHandles,
layer->willPresentCurrentTransaction())) {
flags |= eTraversalNeeded;
@@ -4900,8 +4893,10 @@
}
if (layer == nullptr) {
for (auto& [listener, callbackIds] : s.listeners) {
- mTransactionCallbackInvoker.registerUnpresentedCallbackHandle(
- sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+ mTransactionCallbackInvoker.addCallbackHandle(sp<CallbackHandle>::make(listener,
+ callbackIds,
+ s.surface),
+ std::vector<JankData>());
}
return 0;
}
@@ -5233,11 +5228,10 @@
ALOGW("Couldn't set SCHED_FIFO on display on: %s\n", strerror(errno));
}
getHwComposer().setPowerMode(displayId, mode);
- if (mode != hal::PowerMode::DOZE_SUSPEND) {
- if (isActiveDisplay) {
- mScheduler->onScreenAcquired(mAppConnectionHandle);
- }
- mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, refreshRate);
+ if (isActiveDisplay && mode != hal::PowerMode::DOZE_SUSPEND) {
+ setHWCVsyncEnabled(displayId, mHWCVsyncPendingState);
+ mScheduler->onScreenAcquired(mAppConnectionHandle);
+ mScheduler->resyncToHardwareVsync(true, refreshRate);
}
mVisibleRegionsDirty = true;
@@ -5250,34 +5244,33 @@
if (SurfaceFlinger::setSchedAttr(false) != NO_ERROR) {
ALOGW("Couldn't set uclamp.min on display off: %s\n", strerror(errno));
}
- if (*currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
- mScheduler->disableHardwareVsync(displayId, true);
- if (isActiveDisplay) {
- mScheduler->onScreenReleased(mAppConnectionHandle);
- }
+ if (isActiveDisplay && *currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
+ mScheduler->disableHardwareVsync(true);
+ mScheduler->onScreenReleased(mAppConnectionHandle);
}
+ // Make sure HWVsync is disabled before turning off the display
+ setHWCVsyncEnabled(displayId, hal::Vsync::DISABLE);
+
getHwComposer().setPowerMode(displayId, mode);
mVisibleRegionsDirty = true;
// from this point on, SF will stop drawing on this display
} else if (mode == hal::PowerMode::DOZE || mode == hal::PowerMode::ON) {
// Update display while dozing
getHwComposer().setPowerMode(displayId, mode);
- if (*currentModeOpt == hal::PowerMode::DOZE_SUSPEND) {
- if (isActiveDisplay) {
- mScheduler->onScreenAcquired(mAppConnectionHandle);
- }
+ if (isActiveDisplay && *currentModeOpt == hal::PowerMode::DOZE_SUSPEND) {
ALOGI("Force repainting for DOZE_SUSPEND -> DOZE or ON.");
mVisibleRegionsDirty = true;
scheduleRepaint();
- mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, refreshRate);
+ mScheduler->onScreenAcquired(mAppConnectionHandle);
+ mScheduler->resyncToHardwareVsync(true, refreshRate);
}
} else if (mode == hal::PowerMode::DOZE_SUSPEND) {
// Leave display going to doze
if (isActiveDisplay) {
+ mScheduler->disableHardwareVsync(true);
mScheduler->onScreenReleased(mAppConnectionHandle);
}
- mScheduler->disableHardwareVsync(displayId, true);
getHwComposer().setPowerMode(displayId, mode);
} else {
ALOGE("Attempting to set unknown power mode: %d\n", mode);
@@ -5287,8 +5280,8 @@
if (isActiveDisplay) {
mTimeStats->setPowerMode(mode);
mRefreshRateStats->setPowerMode(mode);
+ mScheduler->setDisplayPowerMode(mode);
}
- mScheduler->setDisplayPowerMode(displayId, mode);
ALOGD("Finished setting power mode %d on display %s", mode, to_string(displayId).c_str());
}
@@ -5466,6 +5459,14 @@
mScheduler->dump(dumper);
+ // TODO(b/241286146): Move to Scheduler.
+ {
+ utils::Dumper::Indent indent(dumper);
+ dumper.dump("lastHwcVsyncState"sv, mLastHWCVsyncState);
+ dumper.dump("pendingHwcVsyncState"sv, mHWCVsyncPendingState);
+ }
+ dumper.eol();
+
// TODO(b/241285876): Move to DisplayModeController.
dumper.dump("debugDisplayModeSetByBackdoor"sv, mDebugDisplayModeSetByBackdoor);
dumper.eol();
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 0bd15dc..5b8038b 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -76,7 +76,6 @@
#include "FrontEnd/LayerSnapshotBuilder.h"
#include "FrontEnd/TransactionHandler.h"
#include "LayerVector.h"
-#include "Scheduler/ISchedulerCallback.h"
#include "Scheduler/RefreshRateSelector.h"
#include "Scheduler/RefreshRateStats.h"
#include "Scheduler/Scheduler.h"
@@ -642,7 +641,7 @@
// Toggles hardware VSYNC by calling into HWC.
// TODO(b/241286146): Rename for self-explanatory API.
- void setVsyncEnabled(PhysicalDisplayId, bool) override;
+ void setVsyncEnabled(bool) override;
void requestDisplayModes(std::vector<display::DisplayModeRequest>) override;
void kernelTimerChanged(bool expired) override;
void triggerOnFrameRateOverridesChanged() override;
@@ -993,9 +992,9 @@
*/
nsecs_t getVsyncPeriodFromHWC() const REQUIRES(mStateLock);
- void setHWCVsyncEnabled(PhysicalDisplayId id, bool enabled) {
- hal::Vsync halState = enabled ? hal::Vsync::ENABLE : hal::Vsync::DISABLE;
- getHwComposer().setVsyncEnabled(id, halState);
+ void setHWCVsyncEnabled(PhysicalDisplayId id, hal::Vsync enabled) {
+ mLastHWCVsyncState = enabled;
+ getHwComposer().setVsyncEnabled(id, enabled);
}
using FenceTimePtr = std::shared_ptr<FenceTime>;
@@ -1130,15 +1129,7 @@
pid_t mPid;
std::future<void> mRenderEnginePrimeCacheFuture;
- // mStateLock has conventions related to the current thread, because only
- // the main thread should modify variables protected by mStateLock.
- // - read access from a non-main thread must lock mStateLock, since the main
- // thread may modify these variables.
- // - write access from a non-main thread is not permitted.
- // - read access from the main thread can use an ftl::FakeGuard, since other
- // threads must not modify these variables.
- // - write access from the main thread must lock mStateLock, since another
- // thread may be reading these variables.
+ // access must be protected by mStateLock
mutable Mutex mStateLock;
State mCurrentState{LayerVector::StateSet::Current};
std::atomic<int32_t> mTransactionFlags = 0;
@@ -1330,6 +1321,9 @@
TimePoint mScheduledPresentTime GUARDED_BY(kMainThreadContext);
TimePoint mExpectedPresentTime GUARDED_BY(kMainThreadContext);
+ hal::Vsync mHWCVsyncPendingState = hal::Vsync::DISABLE;
+ hal::Vsync mLastHWCVsyncState = hal::Vsync::DISABLE;
+
// below flags are set by main thread only
bool mSetActiveModePending = false;
diff --git a/services/surfaceflinger/TransactionCallbackInvoker.cpp b/services/surfaceflinger/TransactionCallbackInvoker.cpp
index 3da98d4..3587a72 100644
--- a/services/surfaceflinger/TransactionCallbackInvoker.cpp
+++ b/services/surfaceflinger/TransactionCallbackInvoker.cpp
@@ -92,11 +92,6 @@
return NO_ERROR;
}
-status_t TransactionCallbackInvoker::registerUnpresentedCallbackHandle(
- const sp<CallbackHandle>& handle) {
- return addCallbackHandle(handle, std::vector<JankData>());
-}
-
status_t TransactionCallbackInvoker::findOrCreateTransactionStats(
const sp<IBinder>& listener, const std::vector<CallbackId>& callbackIds,
TransactionStats** outTransactionStats) {
diff --git a/services/surfaceflinger/TransactionCallbackInvoker.h b/services/surfaceflinger/TransactionCallbackInvoker.h
index 61ff9bc..3074795 100644
--- a/services/surfaceflinger/TransactionCallbackInvoker.h
+++ b/services/surfaceflinger/TransactionCallbackInvoker.h
@@ -66,9 +66,6 @@
status_t addOnCommitCallbackHandles(const std::deque<sp<CallbackHandle>>& handles,
std::deque<sp<CallbackHandle>>& outRemainingHandles);
- // Adds the Transaction CallbackHandle from a layer that does not need to be relatched and
- // presented this frame.
- status_t registerUnpresentedCallbackHandle(const sp<CallbackHandle>& handle);
void addEmptyTransaction(const ListenerCallbacks& listenerCallbacks);
void addPresentFence(sp<Fence>);
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
index 5303db3..cdffbb4 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
@@ -226,27 +226,27 @@
TestableScheduler(const std::shared_ptr<scheduler::RefreshRateSelector>& selectorPtr,
sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
: TestableScheduler(std::make_unique<android::mock::VsyncController>(),
- std::make_shared<android::mock::VSyncTracker>(), selectorPtr,
+ std::make_unique<android::mock::VSyncTracker>(), selectorPtr,
std::move(modulatorPtr), callback) {}
TestableScheduler(std::unique_ptr<VsyncController> controller,
- VsyncSchedule::TrackerPtr tracker,
+ std::unique_ptr<VSyncTracker> tracker,
std::shared_ptr<RefreshRateSelector> selectorPtr,
sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
: Scheduler(*this, callback, Feature::kContentDetection, std::move(modulatorPtr)) {
+ mVsyncSchedule.emplace(VsyncSchedule(std::move(tracker), nullptr, std::move(controller)));
+
const auto displayId = selectorPtr->getActiveMode().modePtr->getPhysicalDisplayId();
registerDisplay(displayId, std::move(selectorPtr));
- mVsyncSchedules.emplace_or_replace(displayId,
- std::shared_ptr<VsyncSchedule>(
- new VsyncSchedule(displayId, std::move(tracker),
- nullptr,
- std::move(controller))));
}
ConnectionHandle createConnection(std::unique_ptr<EventThread> eventThread) {
return Scheduler::createConnection(std::move(eventThread));
}
+ auto &mutablePrimaryHWVsyncEnabled() { return mPrimaryHWVsyncEnabled; }
+ auto &mutableHWVsyncAvailable() { return mHWVsyncAvailable; }
+
auto &mutableLayerHistory() { return mLayerHistory; }
auto refreshRateSelector() { return leaderSelectorPtr(); }
@@ -649,10 +649,10 @@
// The ISchedulerCallback argument can be nullptr for a no-op implementation.
void setupScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
- std::shared_ptr<scheduler::VSyncTracker> vsyncTracker,
+ std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
std::unique_ptr<EventThread> appEventThread,
std::unique_ptr<EventThread> sfEventThread,
- scheduler::ISchedulerCallback* callback = nullptr,
+ scheduler::ISchedulerCallback *callback = nullptr,
bool hasMultipleModes = false) {
constexpr DisplayModeId kModeId60{0};
DisplayModes modes = makeModes(mock::createDisplayMode(kModeId60, 60_Hz));
@@ -791,7 +791,7 @@
}
private:
- void setVsyncEnabled(PhysicalDisplayId, bool) override {}
+ void setVsyncEnabled(bool) override {}
void requestDisplayModes(std::vector<display::DisplayModeRequest>) override {}
void kernelTimerChanged(bool) override {}
void triggerOnFrameRateOverridesChanged() override {}
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp b/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
index b7b42ab..44805db 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
@@ -47,23 +47,19 @@
PowerMode::DOZE_SUSPEND, PowerMode::ON_SUSPEND};
constexpr uint16_t kRandomStringLength = 256;
+constexpr std::chrono::duration kSyncPeriod(16ms);
+
template <typename T>
void dump(T* component, FuzzedDataProvider* fdp) {
std::string res = fdp->ConsumeRandomLengthString(kRandomStringLength);
component->dump(res);
}
-class SchedulerFuzzer : public IEventThreadCallback {
+class SchedulerFuzzer {
public:
SchedulerFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
void process();
- // IEventThreadCallback overrides.
- bool isVsyncTargetForUid(TimePoint /* expectedVsyncTime */, uid_t) const override {
- return true;
- }
- Fps getLeaderRenderFrameRate(uid_t) const override { return 60_Hz; }
-
private:
void fuzzRefreshRateSelection();
void fuzzRefreshRateSelector();
@@ -80,7 +76,7 @@
FuzzedDataProvider mFdp;
- std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
+ std::optional<scheduler::VsyncSchedule> mVsyncSchedule;
};
PhysicalDisplayId SchedulerFuzzer::getPhysicalDisplayId() {
@@ -94,12 +90,12 @@
}
void SchedulerFuzzer::fuzzEventThread() {
- mVsyncSchedule = std::shared_ptr<scheduler::VsyncSchedule>(
- new scheduler::VsyncSchedule(getPhysicalDisplayId(),
- std::make_shared<mock::VSyncTracker>(),
- std::make_shared<mock::VSyncDispatch>(), nullptr));
+ mVsyncSchedule.emplace(scheduler::VsyncSchedule(std::make_unique<mock::VSyncTracker>(),
+ std::make_unique<mock::VSyncDispatch>(),
+ nullptr));
+ const auto getVsyncPeriod = [](uid_t /* uid */) { return kSyncPeriod.count(); };
std::unique_ptr<android::impl::EventThread> thread = std::make_unique<
- android::impl::EventThread>("fuzzer", mVsyncSchedule, *this, nullptr /* TokenManager */,
+ android::impl::EventThread>("fuzzer", *mVsyncSchedule, nullptr, nullptr, getVsyncPeriod,
(std::chrono::nanoseconds)mFdp.ConsumeIntegral<uint64_t>(),
(std::chrono::nanoseconds)mFdp.ConsumeIntegral<uint64_t>());
@@ -136,7 +132,7 @@
}
void SchedulerFuzzer::fuzzVSyncDispatchTimerQueue() {
- auto stubTracker = std::make_shared<FuzzImplVSyncTracker>(mFdp.ConsumeIntegral<nsecs_t>());
+ FuzzImplVSyncTracker stubTracker{mFdp.ConsumeIntegral<nsecs_t>()};
scheduler::VSyncDispatchTimerQueue
mDispatch{std::make_unique<scheduler::ControllableClock>(), stubTracker,
mFdp.ConsumeIntegral<nsecs_t>() /*dispatchGroupThreshold*/,
@@ -149,17 +145,17 @@
scheduler::VSyncDispatchTimerQueueEntry entry(
"fuzz", [](auto, auto, auto) {},
mFdp.ConsumeIntegral<nsecs_t>() /*vSyncMoveThreshold*/);
- entry.update(*stubTracker, 0);
+ entry.update(stubTracker, 0);
entry.schedule({.workDuration = mFdp.ConsumeIntegral<nsecs_t>(),
.readyDuration = mFdp.ConsumeIntegral<nsecs_t>(),
.earliestVsync = mFdp.ConsumeIntegral<nsecs_t>()},
- *stubTracker, 0);
+ stubTracker, 0);
entry.disarm();
entry.ensureNotRunning();
entry.schedule({.workDuration = mFdp.ConsumeIntegral<nsecs_t>(),
.readyDuration = mFdp.ConsumeIntegral<nsecs_t>(),
.earliestVsync = mFdp.ConsumeIntegral<nsecs_t>()},
- *stubTracker, 0);
+ stubTracker, 0);
auto const wakeup = entry.wakeupTime();
auto const ready = entry.readyTime();
entry.callback(entry.executing(), *wakeup, *ready);
@@ -173,8 +169,8 @@
uint16_t now = mFdp.ConsumeIntegral<uint16_t>();
uint16_t historySize = mFdp.ConsumeIntegralInRange<uint16_t>(1, UINT16_MAX);
uint16_t minimumSamplesForPrediction = mFdp.ConsumeIntegralInRange<uint16_t>(1, UINT16_MAX);
- scheduler::VSyncPredictor tracker{"predictor", mFdp.ConsumeIntegral<uint16_t>() /*period*/,
- historySize, minimumSamplesForPrediction,
+ scheduler::VSyncPredictor tracker{mFdp.ConsumeIntegral<uint16_t>() /*period*/, historySize,
+ minimumSamplesForPrediction,
mFdp.ConsumeIntegral<uint32_t>() /*outlierTolerancePercent*/};
uint16_t period = mFdp.ConsumeIntegral<uint16_t>();
tracker.setPeriod(period);
@@ -246,15 +242,13 @@
void SchedulerFuzzer::fuzzVSyncReactor() {
std::shared_ptr<FuzzImplVSyncTracker> vSyncTracker = std::make_shared<FuzzImplVSyncTracker>();
- scheduler::VSyncReactor reactor("fuzzer_reactor",
- std::make_unique<ClockWrapper>(
+ scheduler::VSyncReactor reactor(std::make_unique<ClockWrapper>(
std::make_shared<FuzzImplClock>()),
*vSyncTracker, mFdp.ConsumeIntegral<uint8_t>() /*pendingLimit*/,
false);
- reactor.startPeriodTransition(mFdp.ConsumeIntegral<nsecs_t>(), mFdp.ConsumeBool());
- bool periodFlushed = false; // Value does not matter, since this is an out
- // param from addHwVsyncTimestamp.
+ reactor.startPeriodTransition(mFdp.ConsumeIntegral<nsecs_t>());
+ bool periodFlushed = mFdp.ConsumeBool();
reactor.addHwVsyncTimestamp(0, std::nullopt, &periodFlushed);
reactor.addHwVsyncTimestamp(mFdp.ConsumeIntegral<nsecs_t>() /*newPeriod*/, std::nullopt,
&periodFlushed);
diff --git a/services/surfaceflinger/tests/unittests/CompositionTest.cpp b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
index 419c818..0416e93 100644
--- a/services/surfaceflinger/tests/unittests/CompositionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
@@ -139,7 +139,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index 214b028..e0b508a 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -74,8 +74,8 @@
mock::EventThread::kCallingUid,
ResyncCallback())));
- mFlinger.setupScheduler(std::make_unique<mock::VsyncController>(),
- std::make_shared<mock::VSyncTracker>(),
+ mFlinger.setupScheduler(std::unique_ptr<scheduler::VsyncController>(mVsyncController),
+ std::unique_ptr<scheduler::VSyncTracker>(mVSyncTracker),
std::unique_ptr<EventThread>(mEventThread),
std::unique_ptr<EventThread>(mSFEventThread),
TestableSurfaceFlinger::SchedulerCallbackImpl::kMock);
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
index c9245d6..223f4db 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
@@ -128,6 +128,8 @@
renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
Hwc2::mock::Composer* mComposer = nullptr;
+ mock::VsyncController* mVsyncController = new mock::VsyncController;
+ mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker;
mock::EventThread* mEventThread = new mock::EventThread;
mock::EventThread* mSFEventThread = new mock::EventThread;
diff --git a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
index 5cecb8e..b3aba37 100644
--- a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
+++ b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
@@ -52,9 +52,11 @@
constexpr PhysicalDisplayId DISPLAY_ID_64BIT =
PhysicalDisplayId::fromEdid(0xffu, 0xffffu, 0xffff'ffffu);
+constexpr std::chrono::duration VSYNC_PERIOD(16ms);
+
} // namespace
-class EventThreadTest : public testing::Test, public IEventThreadCallback {
+class EventThreadTest : public testing::Test {
protected:
static constexpr std::chrono::nanoseconds kWorkDuration = 0ms;
static constexpr std::chrono::nanoseconds kReadyDuration = 3ms;
@@ -95,7 +97,7 @@
void expectConfigChangedEventReceivedByConnection(PhysicalDisplayId expectedDisplayId,
int32_t expectedConfigId,
nsecs_t expectedVsyncPeriod);
- void expectThrottleVsyncReceived(TimePoint expectedTimestamp, uid_t);
+ void expectThrottleVsyncReceived(nsecs_t expectedTimestamp, uid_t);
void expectUidFrameRateMappingEventReceivedByConnection(PhysicalDisplayId expectedDisplayId,
std::vector<FrameRateOverride>);
@@ -104,14 +106,6 @@
mThread->onVsync(expectedPresentationTime, timestamp, deadlineTimestamp);
}
- // IEventThreadCallback overrides.
- bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const override {
- mThrottleVsyncCallRecorder.getInvocable()(expectedVsyncTime, uid);
- return uid != mThrottledConnectionUid;
- }
-
- Fps getLeaderRenderFrameRate(uid_t uid) const override { return 60_Hz; }
-
AsyncCallRecorderWithCannedReturn<
scheduler::ScheduleResult (*)(scheduler::VSyncDispatch::CallbackToken,
scheduler::VSyncDispatch::ScheduleTiming)>
@@ -127,11 +121,11 @@
AsyncCallRecorder<void (*)(scheduler::VSyncDispatch::CallbackToken)>
mVSyncCallbackUnregisterRecorder;
AsyncCallRecorder<void (*)()> mResyncCallRecorder;
- mutable AsyncCallRecorder<void (*)(TimePoint, uid_t)> mThrottleVsyncCallRecorder;
+ AsyncCallRecorder<void (*)(nsecs_t, uid_t)> mThrottleVsyncCallRecorder;
ConnectionEventRecorder mConnectionEventCallRecorder{0};
ConnectionEventRecorder mThrottledConnectionEventCallRecorder{0};
- std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
+ std::optional<scheduler::VsyncSchedule> mVsyncSchedule;
std::unique_ptr<impl::EventThread> mThread;
sp<MockEventThreadConnection> mConnection;
sp<MockEventThreadConnection> mThrottledConnection;
@@ -146,12 +140,12 @@
::testing::UnitTest::GetInstance()->current_test_info();
ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
- auto mockDispatchPtr = std::make_shared<mock::VSyncDispatch>();
- mVsyncSchedule = std::shared_ptr<scheduler::VsyncSchedule>(
- new scheduler::VsyncSchedule(INTERNAL_DISPLAY_ID,
- std::make_shared<mock::VSyncTracker>(), mockDispatchPtr,
- nullptr));
- mock::VSyncDispatch& mockDispatch = *mockDispatchPtr;
+ mVsyncSchedule.emplace(scheduler::VsyncSchedule(std::make_unique<mock::VSyncTracker>(),
+ std::make_unique<mock::VSyncDispatch>(),
+ nullptr));
+
+ mock::VSyncDispatch& mockDispatch =
+ *static_cast<mock::VSyncDispatch*>(&mVsyncSchedule->getDispatch());
EXPECT_CALL(mockDispatch, registerCallback(_, _))
.WillRepeatedly(Invoke(mVSyncCallbackRegisterRecorder.getInvocable()));
EXPECT_CALL(mockDispatch, schedule(_, _))
@@ -186,10 +180,19 @@
}
void EventThreadTest::createThread() {
+ const auto throttleVsync = [&](nsecs_t expectedVsyncTimestamp, uid_t uid) {
+ mThrottleVsyncCallRecorder.getInvocable()(expectedVsyncTimestamp, uid);
+ return (uid == mThrottledConnectionUid);
+ };
+ const auto getVsyncPeriod = [](uid_t uid) {
+ return VSYNC_PERIOD.count();
+ };
+
mTokenManager = std::make_unique<frametimeline::impl::TokenManager>();
mThread =
- std::make_unique<impl::EventThread>("EventThreadTest", mVsyncSchedule, *this,
- mTokenManager.get(), kWorkDuration, kReadyDuration);
+ std::make_unique<impl::EventThread>(/*std::move(source), */ "EventThreadTest",
+ *mVsyncSchedule, mTokenManager.get(), throttleVsync,
+ getVsyncPeriod, kWorkDuration, kReadyDuration);
// EventThread should register itself as VSyncSource callback.
EXPECT_TRUE(mVSyncCallbackRegisterRecorder.waitForCall().has_value());
@@ -222,7 +225,7 @@
EXPECT_EQ(expectedReadyDuration.count(), std::get<1>(args.value()).readyDuration);
}
-void EventThreadTest::expectThrottleVsyncReceived(TimePoint expectedTimestamp, uid_t uid) {
+void EventThreadTest::expectThrottleVsyncReceived(nsecs_t expectedTimestamp, uid_t uid) {
auto args = mThrottleVsyncCallRecorder.waitForCall();
ASSERT_TRUE(args.has_value());
EXPECT_EQ(expectedTimestamp, std::get<0>(args.value()));
@@ -373,7 +376,7 @@
// Use the received callback to signal a first vsync event.
// The throttler should receive the event, as well as the connection.
onVSyncEvent(123, 456, 789);
- expectThrottleVsyncReceived(TimePoint::fromNs(456), mConnectionUid);
+ expectThrottleVsyncReceived(456, mConnectionUid);
expectVsyncEventReceivedByConnection(123, 1u);
// EventThread is requesting one more callback due to VsyncRequest::SingleSuppressCallback
@@ -491,17 +494,17 @@
// Send a vsync event. EventThread should then make a call to the
// throttler, and the connection.
onVSyncEvent(123, 456, 789);
- expectThrottleVsyncReceived(TimePoint::fromNs(456), mConnectionUid);
+ expectThrottleVsyncReceived(456, mConnectionUid);
expectVsyncEventReceivedByConnection(123, 1u);
// A second event should go to the same places.
onVSyncEvent(456, 123, 0);
- expectThrottleVsyncReceived(TimePoint::fromNs(123), mConnectionUid);
+ expectThrottleVsyncReceived(123, mConnectionUid);
expectVsyncEventReceivedByConnection(456, 2u);
// A third event should go to the same places.
onVSyncEvent(789, 777, 111);
- expectThrottleVsyncReceived(TimePoint::fromNs(777), mConnectionUid);
+ expectThrottleVsyncReceived(777, mConnectionUid);
expectVsyncEventReceivedByConnection(789, 3u);
}
@@ -740,7 +743,7 @@
// Use the received callback to signal a first vsync event.
// The throttler should receive the event, but not the connection.
onVSyncEvent(123, 456, 789);
- expectThrottleVsyncReceived(TimePoint::fromNs(456), mThrottledConnectionUid);
+ expectThrottleVsyncReceived(456, mThrottledConnectionUid);
mThrottledConnectionEventCallRecorder.waitForUnexpectedCall();
expectVSyncCallbackScheduleReceived(true);
@@ -748,7 +751,7 @@
// The throttler should receive the event, but the connection should
// not as it was only interested in the first.
onVSyncEvent(456, 123, 0);
- expectThrottleVsyncReceived(TimePoint::fromNs(123), mThrottledConnectionUid);
+ expectThrottleVsyncReceived(123, mThrottledConnectionUid);
EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
expectVSyncCallbackScheduleReceived(true);
diff --git a/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp b/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
index 248061c..1cd9e49 100644
--- a/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
@@ -137,7 +137,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
index ff7c2c9..ac63a0e 100644
--- a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
@@ -125,7 +125,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/GameModeTest.cpp b/services/surfaceflinger/tests/unittests/GameModeTest.cpp
index ddf871b..29aa717 100644
--- a/services/surfaceflinger/tests/unittests/GameModeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/GameModeTest.cpp
@@ -76,7 +76,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp b/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
index 23506b1..ee42e19 100644
--- a/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
+++ b/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
@@ -64,7 +64,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
index 8f1b450..7aa5201 100644
--- a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
@@ -67,12 +67,12 @@
struct MessageQueueTest : testing::Test {
void SetUp() override {
- EXPECT_CALL(*mVSyncDispatch, registerCallback(_, "sf")).WillOnce(Return(mCallbackToken));
+ EXPECT_CALL(mVSyncDispatch, registerCallback(_, "sf")).WillOnce(Return(mCallbackToken));
EXPECT_NO_FATAL_FAILURE(mEventQueue.initVsync(mVSyncDispatch, mTokenManager, kDuration));
- EXPECT_CALL(*mVSyncDispatch, unregisterCallback(mCallbackToken)).Times(1);
+ EXPECT_CALL(mVSyncDispatch, unregisterCallback(mCallbackToken)).Times(1);
}
- std::shared_ptr<mock::VSyncDispatch> mVSyncDispatch = std::make_shared<mock::VSyncDispatch>();
+ mock::VSyncDispatch mVSyncDispatch;
MockTokenManager mTokenManager;
TestableMessageQueue mEventQueue;
@@ -90,7 +90,7 @@
.earliestVsync = 0};
EXPECT_FALSE(mEventQueue.getScheduledFrameTime());
- EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+ EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -103,13 +103,13 @@
.readyDuration = 0,
.earliestVsync = 0};
- EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+ EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
EXPECT_EQ(1234, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
- EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
+ EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -122,7 +122,7 @@
.readyDuration = 0,
.earliestVsync = 0};
- EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+ EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -149,7 +149,7 @@
.readyDuration = 0,
.earliestVsync = kPresentTime.ns()};
- EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
+ EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
}
@@ -161,7 +161,7 @@
.readyDuration = 0,
.earliestVsync = 0};
- EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
+ EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
}
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index f0dd06d..4b15385 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include <ftl/fake_guard.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <log/log.h>
@@ -177,7 +176,7 @@
ASSERT_EQ(0u, mScheduler->getNumActiveLayers());
constexpr hal::PowerMode kPowerModeOn = hal::PowerMode::ON;
- FTL_FAKE_GUARD(kMainThreadContext, mScheduler->setDisplayPowerMode(kDisplayId1, kPowerModeOn));
+ mScheduler->setDisplayPowerMode(kPowerModeOn);
constexpr uint32_t kDisplayArea = 999'999;
mScheduler->onActiveDisplayAreaChanged(kDisplayArea);
@@ -249,7 +248,7 @@
mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
constexpr hal::PowerMode kPowerModeOn = hal::PowerMode::ON;
- FTL_FAKE_GUARD(kMainThreadContext, mScheduler->setDisplayPowerMode(kDisplayId1, kPowerModeOn));
+ mScheduler->setDisplayPowerMode(kPowerModeOn);
constexpr uint32_t kDisplayArea = 999'999;
mScheduler->onActiveDisplayAreaChanged(kDisplayArea);
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
index 31f948f..ad3bd35 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
@@ -100,7 +100,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
index 98644aa..f553a23 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
@@ -44,10 +44,7 @@
// We expect a scheduled commit for the display transaction.
EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
- EXPECT_CALL(static_cast<mock::VSyncTracker&>(
- mFlinger.scheduler()->getVsyncSchedule()->getTracker()),
- nextAnticipatedVSyncTimeFrom(_))
- .WillRepeatedly(Return(0));
+ EXPECT_CALL(*mVSyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
// --------------------------------------------------------------------
// Invocation
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
index 2a0f2ef..622717f 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
@@ -116,7 +116,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
index 80ad22c..ab732ed 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
@@ -61,7 +61,7 @@
struct EventThreadBaseSupportedVariant {
static void setupVsyncAndEventThreadNoCallExpectations(DisplayTransactionTest* test) {
// The callback should not be notified to toggle VSYNC.
- EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, _)).Times(0);
+ EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_)).Times(0);
// The event thread should not be notified.
EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
@@ -71,28 +71,24 @@
struct EventThreadNotSupportedVariant : public EventThreadBaseSupportedVariant {
static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
- // The callback should be notified to enable VSYNC.
- EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, true)).Times(1);
+ // These calls are only expected for the primary display.
- // The event thread should not be notified.
- EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
- EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
+ // Instead expect no calls.
+ setupVsyncAndEventThreadNoCallExpectations(test);
}
static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
- // The callback should be notified to disable VSYNC.
- EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, false)).Times(1);
+ // These calls are only expected for the primary display.
- // The event thread should not be notified.
- EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
- EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
+ // Instead expect no calls.
+ setupVsyncAndEventThreadNoCallExpectations(test);
}
};
struct EventThreadIsSupportedVariant : public EventThreadBaseSupportedVariant {
static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
// The callback should be notified to enable VSYNC.
- EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, true)).Times(1);
+ EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(true)).Times(1);
// The event thread should be notified that the screen was acquired.
EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(1);
@@ -100,7 +96,7 @@
static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
// The callback should be notified to disable VSYNC.
- EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, false)).Times(1);
+ EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(false)).Times(1);
// The event thread should not be notified that the screen was released.
EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(1);
@@ -109,12 +105,8 @@
struct DispSyncIsSupportedVariant {
static void setupResetModelCallExpectations(DisplayTransactionTest* test) {
- auto vsyncSchedule = test->mFlinger.scheduler()->getVsyncSchedule();
- EXPECT_CALL(static_cast<mock::VsyncController&>(vsyncSchedule->getController()),
- startPeriodTransition(DEFAULT_VSYNC_PERIOD, false))
- .Times(1);
- EXPECT_CALL(static_cast<mock::VSyncTracker&>(vsyncSchedule->getTracker()), resetModel())
- .Times(1);
+ EXPECT_CALL(*test->mVsyncController, startPeriodTransition(DEFAULT_VSYNC_PERIOD)).Times(1);
+ EXPECT_CALL(*test->mVSyncTracker, resetModel()).Times(1);
}
};
@@ -270,9 +262,8 @@
return display;
}
- static void setInitialHwVsyncEnabled(DisplayTransactionTest* test, PhysicalDisplayId id,
- bool enabled) {
- test->mFlinger.scheduler()->setInitialHwVsyncEnabled(id, enabled);
+ static void setInitialPrimaryHWVsyncEnabled(DisplayTransactionTest* test, bool enabled) {
+ test->mFlinger.scheduler()->mutablePrimaryHWVsyncEnabled() = enabled;
}
static void setupRepaintEverythingCallExpectations(DisplayTransactionTest* test) {
@@ -309,11 +300,6 @@
// A sample configuration for the external display.
// In addition to not having event thread support, we emulate not having doze
// support.
-// FIXME (b/267483230): ExternalDisplay supports the features tracked in
-// DispSyncIsSupportedVariant, but is the follower, so the
-// expectations set by DispSyncIsSupportedVariant don't match (wrong schedule).
-// We need a way to retrieve the proper DisplayId from
-// setupResetModelCallExpectations (or pass it in).
template <typename TransitionVariant>
using ExternalDisplayPowerCase =
DisplayPowerCase<ExternalDisplayVariant, DozeNotSupportedVariant<ExternalDisplayVariant>,
@@ -343,12 +329,9 @@
Case::Doze::setupComposerCallExpectations(this);
auto display =
Case::injectDisplayWithInitialPowerMode(this, Case::Transition::INITIAL_POWER_MODE);
- auto displayId = display->getId();
- if (auto physicalDisplayId = PhysicalDisplayId::tryCast(displayId)) {
- Case::setInitialHwVsyncEnabled(this, *physicalDisplayId,
- PowerModeInitialVSyncEnabled<
- Case::Transition::INITIAL_POWER_MODE>::value);
- }
+ Case::setInitialPrimaryHWVsyncEnabled(this,
+ PowerModeInitialVSyncEnabled<
+ Case::Transition::INITIAL_POWER_MODE>::value);
// --------------------------------------------------------------------
// Call Expectations
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
index 7e14588..fed6a1a 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
@@ -34,7 +34,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TestableScheduler.h b/services/surfaceflinger/tests/unittests/TestableScheduler.h
index c360f93..6cf6141 100644
--- a/services/surfaceflinger/tests/unittests/TestableScheduler.h
+++ b/services/surfaceflinger/tests/unittests/TestableScheduler.h
@@ -37,16 +37,19 @@
public:
TestableScheduler(RefreshRateSelectorPtr selectorPtr, ISchedulerCallback& callback)
: TestableScheduler(std::make_unique<mock::VsyncController>(),
- std::make_shared<mock::VSyncTracker>(), std::move(selectorPtr),
+ std::make_unique<mock::VSyncTracker>(), std::move(selectorPtr),
/* modulatorPtr */ nullptr, callback) {}
TestableScheduler(std::unique_ptr<VsyncController> controller,
- std::shared_ptr<VSyncTracker> tracker, RefreshRateSelectorPtr selectorPtr,
+ std::unique_ptr<VSyncTracker> tracker, RefreshRateSelectorPtr selectorPtr,
sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
: Scheduler(*this, callback, Feature::kContentDetection, std::move(modulatorPtr)) {
+ mVsyncSchedule.emplace(VsyncSchedule(std::move(tracker),
+ std::make_unique<mock::VSyncDispatch>(),
+ std::move(controller)));
+
const auto displayId = selectorPtr->getActiveMode().modePtr->getPhysicalDisplayId();
- registerDisplay(displayId, std::move(selectorPtr), std::move(controller),
- std::move(tracker));
+ registerDisplay(displayId, std::move(selectorPtr));
ON_CALL(*this, postMessage).WillByDefault([](sp<MessageHandler>&& handler) {
// Execute task to prevent broken promise exception on destruction.
@@ -63,6 +66,13 @@
return Scheduler::createConnection(std::move(eventThread));
}
+ /* ------------------------------------------------------------------------
+ * Read-write access to private data to set up preconditions and assert
+ * post-conditions.
+ */
+ auto& mutablePrimaryHWVsyncEnabled() { return mPrimaryHWVsyncEnabled; }
+ auto& mutableHWVsyncAvailable() { return mHWVsyncAvailable; }
+
auto refreshRateSelector() { return leaderSelectorPtr(); }
const auto& refreshRateSelectors() const NO_THREAD_SAFETY_ANALYSIS {
@@ -70,21 +80,8 @@
}
void registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr) {
- registerDisplay(displayId, std::move(selectorPtr),
- std::make_unique<mock::VsyncController>(),
- std::make_shared<mock::VSyncTracker>());
- }
-
- void registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr,
- std::unique_ptr<VsyncController> controller,
- std::shared_ptr<VSyncTracker> tracker) {
ftl::FakeGuard guard(kMainThreadContext);
- Scheduler::registerDisplayInternal(displayId, std::move(selectorPtr),
- std::shared_ptr<VsyncSchedule>(
- new VsyncSchedule(displayId, std::move(tracker),
- std::make_shared<
- mock::VSyncDispatch>(),
- std::move(controller))));
+ Scheduler::registerDisplay(displayId, std::move(selectorPtr));
}
void unregisterDisplay(PhysicalDisplayId displayId) {
@@ -160,13 +157,6 @@
Scheduler::onNonPrimaryDisplayModeChanged(handle, mode);
}
- void setInitialHwVsyncEnabled(PhysicalDisplayId id, bool enabled) {
- auto schedule = getVsyncSchedule(id);
- std::lock_guard<std::mutex> lock(schedule->mHwVsyncLock);
- schedule->mHwVsyncState = enabled ? VsyncSchedule::HwVsyncState::Enabled
- : VsyncSchedule::HwVsyncState::Disabled;
- }
-
private:
// ICompositor overrides:
void configure() override {}
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index 63b79a4..68c738f 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -201,7 +201,7 @@
std::variant<OneDisplayMode, TwoDisplayModes, RefreshRateSelectorPtr>;
void setupScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
- std::shared_ptr<scheduler::VSyncTracker> vsyncTracker,
+ std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
std::unique_ptr<EventThread> appEventThread,
std::unique_ptr<EventThread> sfEventThread,
SchedulerCallbackImpl callbackImpl = SchedulerCallbackImpl::kNoOp,
@@ -253,7 +253,7 @@
std::move(modulatorPtr), callback);
}
- mScheduler->initVsync(mScheduler->getVsyncSchedule()->getDispatch(), *mTokenManager, 0ms);
+ mScheduler->initVsync(mScheduler->getVsyncSchedule().getDispatch(), *mTokenManager, 0ms);
mFlinger->mAppConnectionHandle = mScheduler->createConnection(std::move(appEventThread));
mFlinger->mSfConnectionHandle = mScheduler->createConnection(std::move(sfEventThread));
diff --git a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
index a9a617b..859f702 100644
--- a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
@@ -83,14 +83,15 @@
mFlinger.setupComposer(std::make_unique<Hwc2::mock::Composer>());
mFlinger.setupScheduler(std::unique_ptr<mock::VsyncController>(mVsyncController),
- mVSyncTracker, std::move(eventThread), std::move(sfEventThread));
+ std::unique_ptr<mock::VSyncTracker>(mVSyncTracker),
+ std::move(eventThread), std::move(sfEventThread));
mFlinger.flinger()->addTransactionReadyFilters();
}
TestableSurfaceFlinger mFlinger;
mock::VsyncController* mVsyncController = new mock::VsyncController();
- std::shared_ptr<mock::VSyncTracker> mVSyncTracker = std::make_shared<mock::VSyncTracker>();
+ mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker();
struct TransactionInfo {
Vector<ComposerState> states;
diff --git a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
index b228bcb..1173d1c 100644
--- a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
@@ -85,7 +85,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
index bfebecd..ae03db4 100644
--- a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
@@ -84,7 +84,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
index aa33716..da87f1d 100644
--- a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
@@ -117,7 +117,7 @@
ResyncCallback())));
auto vsyncController = std::make_unique<mock::VsyncController>();
- auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+ auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
index fcd2f56..47c2dee 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -109,8 +109,7 @@
class RepeatingCallbackReceiver {
public:
- RepeatingCallbackReceiver(std::shared_ptr<VSyncDispatch> dispatch, nsecs_t workload,
- nsecs_t readyDuration)
+ RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t workload, nsecs_t readyDuration)
: mWorkload(workload),
mReadyDuration(readyDuration),
mCallback(
@@ -167,10 +166,9 @@
};
TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
- auto tracker = std::make_shared<FixedRateIdealStubTracker>();
- auto dispatch =
- std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
- mDispatchGroupThreshold, mVsyncMoveThreshold);
+ FixedRateIdealStubTracker tracker;
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
+ mVsyncMoveThreshold);
static size_t constexpr num_clients = 3;
std::array<RepeatingCallbackReceiver, num_clients>
@@ -197,15 +195,14 @@
// starts at 333hz, slides down to 43hz
TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
auto next_vsync_interval = toNs(3ms);
- auto tracker = std::make_shared<VRRStubTracker>(next_vsync_interval);
- auto dispatch =
- std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
- mDispatchGroupThreshold, mVsyncMoveThreshold);
+ VRRStubTracker tracker(next_vsync_interval);
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
+ mVsyncMoveThreshold);
RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
auto const on_each_frame = [&](nsecs_t last_known) {
- tracker->set_interval(next_vsync_interval += toNs(1ms), last_known);
+ tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
};
std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
@@ -216,10 +213,9 @@
// starts at 333hz, jumps to 200hz at frame 10
TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
- auto tracker = std::make_shared<VRRStubTracker>(toNs(3ms));
- auto dispatch =
- std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
- mDispatchGroupThreshold, mVsyncMoveThreshold);
+ VRRStubTracker tracker(toNs(3ms));
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
+ mVsyncMoveThreshold);
RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
@@ -227,7 +223,7 @@
auto constexpr jump_frame_at = 10u;
auto const on_each_frame = [&](nsecs_t last_known) {
if (jump_frame_counter++ == jump_frame_at) {
- tracker->set_interval(toNs(5ms), last_known);
+ tracker.set_interval(toNs(5ms), last_known);
}
};
std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
index 82daffd..14a2860 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
@@ -116,14 +116,13 @@
class CountingCallback {
public:
- CountingCallback(std::shared_ptr<VSyncDispatch> dispatch)
- : mDispatch(std::move(dispatch)),
- mToken(mDispatch->registerCallback(std::bind(&CountingCallback::counter, this,
- std::placeholders::_1,
- std::placeholders::_2,
- std::placeholders::_3),
- "test")) {}
- ~CountingCallback() { mDispatch->unregisterCallback(mToken); }
+ CountingCallback(VSyncDispatch& dispatch)
+ : mDispatch(dispatch),
+ mToken(dispatch.registerCallback(std::bind(&CountingCallback::counter, this,
+ std::placeholders::_1, std::placeholders::_2,
+ std::placeholders::_3),
+ "test")) {}
+ ~CountingCallback() { mDispatch.unregisterCallback(mToken); }
operator VSyncDispatch::CallbackToken() const { return mToken; }
@@ -133,7 +132,7 @@
mReadyTime.push_back(readyTime);
}
- std::shared_ptr<VSyncDispatch> mDispatch;
+ VSyncDispatch& mDispatch;
VSyncDispatch::CallbackToken mToken;
std::vector<nsecs_t> mCalls;
std::vector<nsecs_t> mWakeupTime;
@@ -142,12 +141,12 @@
class PausingCallback {
public:
- PausingCallback(std::shared_ptr<VSyncDispatch> dispatch, std::chrono::milliseconds pauseAmount)
- : mDispatch(std::move(dispatch)),
- mToken(mDispatch->registerCallback(std::bind(&PausingCallback::pause, this,
- std::placeholders::_1,
- std::placeholders::_2),
- "test")),
+ PausingCallback(VSyncDispatch& dispatch, std::chrono::milliseconds pauseAmount)
+ : mDispatch(dispatch),
+ mToken(dispatch.registerCallback(std::bind(&PausingCallback::pause, this,
+ std::placeholders::_1,
+ std::placeholders::_2),
+ "test")),
mRegistered(true),
mPauseAmount(pauseAmount) {}
~PausingCallback() { unregister(); }
@@ -182,12 +181,12 @@
void unregister() {
if (mRegistered) {
- mDispatch->unregisterCallback(mToken);
+ mDispatch.unregisterCallback(mToken);
mRegistered = false;
}
}
- std::shared_ptr<VSyncDispatch> mDispatch;
+ VSyncDispatch& mDispatch;
VSyncDispatch::CallbackToken mToken;
bool mRegistered = true;
@@ -232,26 +231,22 @@
static nsecs_t constexpr mDispatchGroupThreshold = 5;
nsecs_t const mPeriod = 1000;
nsecs_t const mVsyncMoveThreshold = 300;
- std::shared_ptr<NiceMock<MockVSyncTracker>> mStubTracker =
- std::make_shared<NiceMock<MockVSyncTracker>>(mPeriod);
- std::shared_ptr<VSyncDispatch> mDispatch =
- std::make_shared<VSyncDispatchTimerQueue>(createTimeKeeper(), mStubTracker,
- mDispatchGroupThreshold, mVsyncMoveThreshold);
+ NiceMock<MockVSyncTracker> mStubTracker{mPeriod};
+ VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
+ mVsyncMoveThreshold};
};
TEST_F(VSyncDispatchTimerQueueTest, unregistersSetAlarmOnDestruction) {
EXPECT_CALL(mMockClock, alarmAt(_, 900));
EXPECT_CALL(mMockClock, alarmCancel());
{
- std::shared_ptr<VSyncDispatch> mDispatch =
- std::make_shared<VSyncDispatchTimerQueue>(createTimeKeeper(), mStubTracker,
- mDispatchGroupThreshold,
- mVsyncMoveThreshold);
+ VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
+ mVsyncMoveThreshold};
CountingCallback cb(mDispatch);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = 1000});
+ const auto result = mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(900, *result);
}
@@ -262,10 +257,10 @@
EXPECT_CALL(mMockClock, alarmAt(_, 900));
CountingCallback cb(mDispatch);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = intended});
+ const auto result = mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = intended});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(900, *result);
@@ -282,15 +277,14 @@
EXPECT_CALL(mMockClock, alarmAt(_, 700)).InSequence(seq);
CountingCallback cb(mDispatch);
- auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = intended});
+ auto result = mDispatch.schedule(cb,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = intended});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(900, *result);
- result =
- mDispatch->update(cb,
+ result = mDispatch.update(cb,
{.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(700, *result);
@@ -308,17 +302,17 @@
CountingCallback cb(mDispatch);
const auto result =
- mDispatch->update(cb,
- {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
+ mDispatch.update(cb,
+ {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
EXPECT_FALSE(result.has_value());
}
TEST_F(VSyncDispatchTimerQueueTest, basicAlarmSettingFutureWithAdjustmentToTrueVsync) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000)).WillOnce(Return(1150));
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000)).WillOnce(Return(1150));
EXPECT_CALL(mMockClock, alarmAt(_, 1050));
CountingCallback cb(mDispatch);
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
advanceToNextCallback();
ASSERT_THAT(cb.mCalls.size(), Eq(1));
@@ -329,15 +323,15 @@
auto const now = 234;
mMockClock.advanceBy(234);
auto const workDuration = 10 * mPeriod;
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(now + workDuration))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(now + workDuration))
.WillOnce(Return(mPeriod * 11));
EXPECT_CALL(mMockClock, alarmAt(_, mPeriod));
CountingCallback cb(mDispatch);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = workDuration,
- .readyDuration = 0,
- .earliestVsync = mPeriod});
+ const auto result = mDispatch.schedule(cb,
+ {.workDuration = workDuration,
+ .readyDuration = 0,
+ .earliestVsync = mPeriod});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod, *result);
}
@@ -347,13 +341,12 @@
EXPECT_CALL(mMockClock, alarmCancel());
CountingCallback cb(mDispatch);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = mPeriod});
+ const auto result =
+ mDispatch.schedule(cb,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod - 100, *result);
- EXPECT_EQ(mDispatch->cancel(cb), CancelResult::Cancelled);
+ EXPECT_EQ(mDispatch.cancel(cb), CancelResult::Cancelled);
}
TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancelTooLate) {
@@ -361,14 +354,13 @@
EXPECT_CALL(mMockClock, alarmCancel());
CountingCallback cb(mDispatch);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = mPeriod});
+ const auto result =
+ mDispatch.schedule(cb,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod - 100, *result);
mMockClock.advanceBy(950);
- EXPECT_EQ(mDispatch->cancel(cb), CancelResult::TooLate);
+ EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
}
TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancelTooLateWhenRunning) {
@@ -376,16 +368,15 @@
EXPECT_CALL(mMockClock, alarmCancel());
PausingCallback cb(mDispatch, std::chrono::duration_cast<std::chrono::milliseconds>(1s));
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = mPeriod});
+ const auto result =
+ mDispatch.schedule(cb,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod - 100, *result);
std::thread pausingThread([&] { mMockClock.advanceToNextCallback(); });
EXPECT_TRUE(cb.waitForPause());
- EXPECT_EQ(mDispatch->cancel(cb), CancelResult::TooLate);
+ EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
cb.unpause();
pausingThread.join();
}
@@ -398,10 +389,9 @@
PausingCallback cb(mDispatch, 50ms);
cb.stashResource(resource);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = mPeriod});
+ const auto result =
+ mDispatch.schedule(cb,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod - 100, *result);
@@ -418,7 +408,7 @@
}
TEST_F(VSyncDispatchTimerQueueTest, basicTwoAlarmSetting) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
.Times(4)
.WillOnce(Return(1055))
.WillOnce(Return(1063))
@@ -433,8 +423,8 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
- mDispatch->schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
+ mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+ mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
advanceToNextCallback();
advanceToNextCallback();
@@ -446,7 +436,7 @@
}
TEST_F(VSyncDispatchTimerQueueTest, noCloseCallbacksAfterPeriodChange) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
.Times(4)
.WillOnce(Return(1000))
.WillOnce(Return(2000))
@@ -460,21 +450,21 @@
CountingCallback cb(mDispatch);
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 0});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 0});
advanceToNextCallback();
ASSERT_THAT(cb.mCalls.size(), Eq(1));
EXPECT_THAT(cb.mCalls[0], Eq(1000));
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
ASSERT_THAT(cb.mCalls.size(), Eq(2));
EXPECT_THAT(cb.mCalls[1], Eq(2000));
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
advanceToNextCallback();
@@ -483,7 +473,7 @@
}
TEST_F(VSyncDispatchTimerQueueTest, rearmsFaroutTimeoutWhenCancellingCloseOne) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
.Times(4)
.WillOnce(Return(10000))
.WillOnce(Return(1000))
@@ -498,10 +488,10 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0,
- {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
- mDispatch->schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
- mDispatch->cancel(cb1);
+ mDispatch.schedule(cb0,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
+ mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
+ mDispatch.cancel(cb1);
}
TEST_F(VSyncDispatchTimerQueueTest, noUnnecessaryRearmsWhenRescheduling) {
@@ -512,9 +502,9 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
}
@@ -527,9 +517,9 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
}
@@ -547,10 +537,10 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1,
- {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1,
+ {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
ASSERT_THAT(cb0.mCalls.size(), Eq(1));
@@ -558,11 +548,9 @@
ASSERT_THAT(cb1.mCalls.size(), Eq(1));
EXPECT_THAT(cb1.mCalls[0], Eq(mPeriod));
- mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
- mDispatch->schedule(cb1,
- {.workDuration = notCloseOffset,
- .readyDuration = 0,
- .earliestVsync = 2000});
+ mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb1,
+ {.workDuration = notCloseOffset, .readyDuration = 0, .earliestVsync = 2000});
advanceToNextCallback();
ASSERT_THAT(cb1.mCalls.size(), Eq(2));
EXPECT_THAT(cb1.mCalls[1], Eq(2000));
@@ -582,32 +570,32 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
- EXPECT_EQ(mDispatch->cancel(cb0), CancelResult::Cancelled);
+ EXPECT_EQ(mDispatch.cancel(cb0), CancelResult::Cancelled);
}
TEST_F(VSyncDispatchTimerQueueTest, setAlarmCallsAtCorrectTimeWithChangingVsync) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
.Times(3)
.WillOnce(Return(950))
.WillOnce(Return(1975))
.WillOnce(Return(2950));
CountingCallback cb(mDispatch);
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
mMockClock.advanceBy(850);
EXPECT_THAT(cb.mCalls.size(), Eq(1));
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
mMockClock.advanceBy(900);
EXPECT_THAT(cb.mCalls.size(), Eq(1));
mMockClock.advanceBy(125);
EXPECT_THAT(cb.mCalls.size(), Eq(2));
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
mMockClock.advanceBy(975);
EXPECT_THAT(cb.mCalls.size(), Eq(3));
}
@@ -618,48 +606,48 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
VSyncDispatch::CallbackToken tmp;
- tmp = mDispatch->registerCallback(
+ tmp = mDispatch.registerCallback(
[&](auto, auto, auto) {
- mDispatch->schedule(tmp,
- {.workDuration = 100,
- .readyDuration = 0,
- .earliestVsync = 2000});
+ mDispatch.schedule(tmp,
+ {.workDuration = 100,
+ .readyDuration = 0,
+ .earliestVsync = 2000});
},
"o.o");
- mDispatch->schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
}
TEST_F(VSyncDispatchTimerQueueTest, callbackReentrantWithPastWakeup) {
VSyncDispatch::CallbackToken tmp;
std::optional<nsecs_t> lastTarget;
- tmp = mDispatch->registerCallback(
+ tmp = mDispatch.registerCallback(
[&](auto timestamp, auto, auto) {
auto result =
- mDispatch->schedule(tmp,
+ mDispatch.schedule(tmp,
+ {.workDuration = 400,
+ .readyDuration = 0,
+ .earliestVsync = timestamp - mVsyncMoveThreshold});
+ EXPECT_TRUE(result.has_value());
+ EXPECT_EQ(mPeriod + timestamp - 400, *result);
+ result = mDispatch.schedule(tmp,
{.workDuration = 400,
.readyDuration = 0,
- .earliestVsync = timestamp - mVsyncMoveThreshold});
+ .earliestVsync = timestamp});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod + timestamp - 400, *result);
- result = mDispatch->schedule(tmp,
- {.workDuration = 400,
- .readyDuration = 0,
- .earliestVsync = timestamp});
- EXPECT_TRUE(result.has_value());
- EXPECT_EQ(mPeriod + timestamp - 400, *result);
- result = mDispatch->schedule(tmp,
- {.workDuration = 400,
- .readyDuration = 0,
- .earliestVsync = timestamp + mVsyncMoveThreshold});
+ result = mDispatch.schedule(tmp,
+ {.workDuration = 400,
+ .readyDuration = 0,
+ .earliestVsync = timestamp + mVsyncMoveThreshold});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(mPeriod + timestamp - 400, *result);
lastTarget = timestamp;
},
"oo");
- mDispatch->schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
EXPECT_THAT(lastTarget, Eq(1000));
@@ -675,16 +663,16 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
CountingCallback cb(mDispatch);
- mDispatch->schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
mMockClock.advanceBy(750);
- mDispatch->schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
- mDispatch->schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
mMockClock.advanceBy(800);
- mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
}
TEST_F(VSyncDispatchTimerQueueTest, lateModifications) {
@@ -697,12 +685,12 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
- mDispatch->schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
- mDispatch->schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
+ mDispatch.schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
advanceToNextCallback();
@@ -714,8 +702,8 @@
CountingCallback cb0(mDispatch);
CountingCallback cb1(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
+ mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
}
TEST_F(VSyncDispatchTimerQueueTest, setsTimerAfterCancellation) {
@@ -725,30 +713,29 @@
EXPECT_CALL(mMockClock, alarmAt(_, 900)).InSequence(seq);
CountingCallback cb0(mDispatch);
- mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->cancel(cb0);
- mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.cancel(cb0);
+ mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
}
TEST_F(VSyncDispatchTimerQueueTest, makingUpIdsError) {
VSyncDispatch::CallbackToken token(100);
- EXPECT_FALSE(
- mDispatch
- ->schedule(token,
- {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000})
- .has_value());
- EXPECT_THAT(mDispatch->cancel(token), Eq(CancelResult::Error));
+ EXPECT_FALSE(mDispatch
+ .schedule(token,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000})
+ .has_value());
+ EXPECT_THAT(mDispatch.cancel(token), Eq(CancelResult::Error));
}
TEST_F(VSyncDispatchTimerQueueTest, canMoveCallbackBackwardsInTime) {
CountingCallback cb0(mDispatch);
auto result =
- mDispatch->schedule(cb0,
- {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(500, *result);
- result = mDispatch->schedule(cb0,
- {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+ result = mDispatch.schedule(cb0,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(900, *result);
}
@@ -758,14 +745,14 @@
EXPECT_CALL(mMockClock, alarmAt(_, 500));
CountingCallback cb(mDispatch);
auto result =
- mDispatch->schedule(cb,
- {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(500, *result);
mMockClock.advanceBy(400);
- result = mDispatch->schedule(cb,
- {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000});
+ result = mDispatch.schedule(cb,
+ {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(1200, *result);
advanceToNextCallback();
@@ -773,19 +760,19 @@
}
TEST_F(VSyncDispatchTimerQueueTest, targetOffsetMovingBackALittleCanStillSchedule) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
.Times(2)
.WillOnce(Return(1000))
.WillOnce(Return(1002));
CountingCallback cb(mDispatch);
auto result =
- mDispatch->schedule(cb,
- {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(500, *result);
mMockClock.advanceBy(400);
- result = mDispatch->schedule(cb,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ result = mDispatch.schedule(cb,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(602, *result);
}
@@ -793,13 +780,13 @@
TEST_F(VSyncDispatchTimerQueueTest, canScheduleNegativeOffsetAgainstDifferentPeriods) {
CountingCallback cb0(mDispatch);
auto result =
- mDispatch->schedule(cb0,
- {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(500, *result);
advanceToNextCallback();
- result = mDispatch->schedule(cb0,
- {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000});
+ result = mDispatch.schedule(cb0,
+ {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(900, *result);
}
@@ -810,13 +797,13 @@
EXPECT_CALL(mMockClock, alarmAt(_, 1100)).InSequence(seq);
CountingCallback cb0(mDispatch);
auto result =
- mDispatch->schedule(cb0,
- {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb0,
+ {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(500, *result);
advanceToNextCallback();
- result = mDispatch->schedule(cb0,
- {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000});
+ result = mDispatch.schedule(cb0,
+ {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(1100, *result);
}
@@ -826,13 +813,13 @@
CountingCallback cb(mDispatch);
auto result =
- mDispatch->schedule(cb,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
- result = mDispatch->schedule(cb,
- {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
+ result = mDispatch.schedule(cb,
+ {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
@@ -878,16 +865,16 @@
CountingCallback cb2(mDispatch);
auto result =
- mDispatch->schedule(cb1,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
- result = mDispatch->schedule(cb2,
- {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+ result = mDispatch.schedule(cb2,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(1900, *result);
mMockClock.advanceBy(80);
@@ -906,16 +893,16 @@
CountingCallback cb(mDispatch);
auto result =
- mDispatch->schedule(cb,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
- result = mDispatch->schedule(cb,
- {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000});
+ result = mDispatch.schedule(cb,
+ {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(1630, *result);
mMockClock.advanceBy(80);
@@ -932,19 +919,19 @@
CountingCallback cb2(mDispatch);
auto result =
- mDispatch->schedule(cb1,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
- result = mDispatch->schedule(cb2,
- {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+ result = mDispatch.schedule(cb2,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(1900, *result);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
- EXPECT_EQ(mDispatch->cancel(cb2), CancelResult::Cancelled);
+ EXPECT_EQ(mDispatch.cancel(cb2), CancelResult::Cancelled);
mMockClock.advanceBy(80);
@@ -961,19 +948,19 @@
CountingCallback cb2(mDispatch);
auto result =
- mDispatch->schedule(cb1,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
- result = mDispatch->schedule(cb2,
- {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+ result = mDispatch.schedule(cb2,
+ {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(1900, *result);
mMockClock.setLag(100);
mMockClock.advanceBy(620);
- EXPECT_EQ(mDispatch->cancel(cb1), CancelResult::Cancelled);
+ EXPECT_EQ(mDispatch.cancel(cb1), CancelResult::Cancelled);
EXPECT_THAT(cb1.mCalls.size(), Eq(0));
EXPECT_THAT(cb2.mCalls.size(), Eq(0));
@@ -988,21 +975,21 @@
CountingCallback cb2(mDispatch);
Sequence seq;
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
.InSequence(seq)
.WillOnce(Return(1000));
EXPECT_CALL(mMockClock, alarmAt(_, 600)).InSequence(seq);
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
.InSequence(seq)
.WillOnce(Return(1000));
auto result =
- mDispatch->schedule(cb1,
- {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb1,
+ {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(600, *result);
- result = mDispatch->schedule(cb2,
- {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000});
+ result = mDispatch.schedule(cb2,
+ {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(610, *result);
@@ -1024,10 +1011,10 @@
EXPECT_CALL(mMockClock, alarmAt(_, 900));
CountingCallback cb(mDispatch);
- const auto result = mDispatch->schedule(cb,
- {.workDuration = 70,
- .readyDuration = 30,
- .earliestVsync = intended});
+ const auto result = mDispatch.schedule(cb,
+ {.workDuration = 70,
+ .readyDuration = 30,
+ .earliestVsync = intended});
EXPECT_TRUE(result.has_value());
EXPECT_EQ(900, *result);
advanceToNextCallback();
@@ -1046,8 +1033,8 @@
CountingCallback cb(mDispatch);
- mDispatch->schedule(cb, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
- mDispatch->schedule(cb, {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+ mDispatch.schedule(cb, {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
advanceToNextCallback();
@@ -1065,8 +1052,7 @@
protected:
nsecs_t const mPeriod = 1000;
nsecs_t const mVsyncMoveThreshold = 200;
- std::shared_ptr<NiceMock<MockVSyncTracker>> mStubTracker =
- std::make_shared<NiceMock<MockVSyncTracker>>(mPeriod);
+ NiceMock<MockVSyncTracker> mStubTracker{mPeriod};
};
TEST_F(VSyncDispatchTimerQueueEntryTest, stateAfterInitialization) {
@@ -1084,7 +1070,7 @@
EXPECT_FALSE(entry.wakeupTime());
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
@@ -1098,7 +1084,7 @@
auto const duration = 500;
auto const now = 8750;
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(now + duration))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(now + duration))
.Times(1)
.WillOnce(Return(10000));
VSyncDispatchTimerQueueEntry entry(
@@ -1106,7 +1092,7 @@
EXPECT_FALSE(entry.wakeupTime());
EXPECT_TRUE(entry.schedule({.workDuration = 500, .readyDuration = 0, .earliestVsync = 994},
- *mStubTracker.get(), now)
+ mStubTracker, now)
.has_value());
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
@@ -1129,7 +1115,7 @@
mVsyncMoveThreshold);
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
@@ -1151,7 +1137,7 @@
}
TEST_F(VSyncDispatchTimerQueueEntryTest, updateCallback) {
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
.Times(2)
.WillOnce(Return(1000))
.WillOnce(Return(1020));
@@ -1160,17 +1146,17 @@
"test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_FALSE(entry.wakeupTime());
- entry.update(*mStubTracker.get(), 0);
+ entry.update(mStubTracker, 0);
EXPECT_FALSE(entry.wakeupTime());
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
auto wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
EXPECT_THAT(wakeup, Eq(900));
- entry.update(*mStubTracker.get(), 0);
+ entry.update(mStubTracker, 0);
wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
EXPECT_THAT(*wakeup, Eq(920));
@@ -1180,9 +1166,9 @@
VSyncDispatchTimerQueueEntry entry(
"test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
- entry.update(*mStubTracker.get(), 0);
+ entry.update(mStubTracker, 0);
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
@@ -1193,24 +1179,24 @@
VSyncDispatchTimerQueueEntry entry(
"test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
entry.executing(); // 1000 is executing
// had 1000 not been executing, this could have been scheduled for time 800.
EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
EXPECT_THAT(*entry.readyTime(), Eq(2000));
EXPECT_TRUE(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
EXPECT_THAT(*entry.wakeupTime(), Eq(1950));
EXPECT_THAT(*entry.readyTime(), Eq(2000));
EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 1001},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
EXPECT_THAT(*entry.readyTime(), Eq(2000));
@@ -1222,24 +1208,24 @@
"test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
Sequence seq;
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(500))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
.InSequence(seq)
.WillOnce(Return(1000));
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(500))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
.InSequence(seq)
.WillOnce(Return(1000));
- EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000 + mVsyncMoveThreshold))
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000 + mVsyncMoveThreshold))
.InSequence(seq)
.WillOnce(Return(2000));
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
entry.executing(); // 1000 is executing
EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
}
@@ -1247,16 +1233,16 @@
VSyncDispatchTimerQueueEntry entry(
"test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
EXPECT_TRUE(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
EXPECT_TRUE(entry.schedule({.workDuration = 1200, .readyDuration = 0, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
}
@@ -1269,7 +1255,7 @@
entry.addPendingWorkloadUpdate(
{.workDuration = effectualOffset, .readyDuration = 0, .earliestVsync = 400});
EXPECT_TRUE(entry.hasPendingWorkloadUpdate());
- entry.update(*mStubTracker.get(), 0);
+ entry.update(mStubTracker, 0);
EXPECT_FALSE(entry.hasPendingWorkloadUpdate());
EXPECT_THAT(*entry.wakeupTime(), Eq(mPeriod - effectualOffset));
}
@@ -1290,7 +1276,7 @@
mVsyncMoveThreshold);
EXPECT_TRUE(entry.schedule({.workDuration = 70, .readyDuration = 30, .earliestVsync = 500},
- *mStubTracker.get(), 0)
+ mStubTracker, 0)
.has_value());
auto const wakeup = entry.wakeupTime();
ASSERT_TRUE(wakeup);
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
index 7947a5e..3095e8a 100644
--- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -55,7 +55,7 @@
static constexpr size_t kOutlierTolerancePercent = 25;
static constexpr nsecs_t mMaxRoundingError = 100;
- VSyncPredictor tracker{"tracker", mPeriod, kHistorySize, kMinimumSamplesForPrediction,
+ VSyncPredictor tracker{mPeriod, kHistorySize, kMinimumSamplesForPrediction,
kOutlierTolerancePercent};
};
@@ -376,8 +376,7 @@
// See b/151146131
TEST_F(VSyncPredictorTest, hasEnoughPrecision) {
- VSyncPredictor tracker{"tracker", mPeriod, 20, kMinimumSamplesForPrediction,
- kOutlierTolerancePercent};
+ VSyncPredictor tracker{mPeriod, 20, kMinimumSamplesForPrediction, kOutlierTolerancePercent};
std::vector<nsecs_t> const simulatedVsyncs{840873348817, 840890049444, 840906762675,
840923581635, 840940161584, 840956868096,
840973702473, 840990256277, 841007116851,
diff --git a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
index a2de136..1fb2709 100644
--- a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
@@ -96,8 +96,8 @@
VSyncReactorTest()
: mMockTracker(std::make_shared<NiceMock<MockVSyncTracker>>()),
mMockClock(std::make_shared<NiceMock<MockClock>>()),
- mReactor("reactor", std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
- kPendingLimit, false /* supportKernelIdleTimer */) {
+ mReactor(std::make_unique<ClockWrapper>(mMockClock), *mMockTracker, kPendingLimit,
+ false /* supportKernelIdleTimer */) {
ON_CALL(*mMockClock, now()).WillByDefault(Return(mFakeNow));
ON_CALL(*mMockTracker, currentPeriod()).WillByDefault(Return(period));
}
@@ -192,7 +192,7 @@
mReactor.setIgnorePresentFences(true);
nsecs_t const newPeriod = 5000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(0, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
@@ -205,7 +205,7 @@
TEST_F(VSyncReactorTest, setPeriodCalledOnceConfirmedChange) {
nsecs_t const newPeriod = 5000;
EXPECT_CALL(*mMockTracker, setPeriod(_)).Times(0);
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
bool periodFlushed = true;
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(10000, std::nullopt, &periodFlushed));
@@ -224,7 +224,7 @@
TEST_F(VSyncReactorTest, changingPeriodBackAbortsConfirmationProcess) {
nsecs_t sampleTime = 0;
nsecs_t const newPeriod = 5000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
bool periodFlushed = true;
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
@@ -232,7 +232,7 @@
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
- mReactor.startPeriodTransition(period, false);
+ mReactor.startPeriodTransition(period);
EXPECT_FALSE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
}
@@ -242,13 +242,13 @@
nsecs_t const secondPeriod = 5000;
nsecs_t const thirdPeriod = 2000;
- mReactor.startPeriodTransition(secondPeriod, false);
+ mReactor.startPeriodTransition(secondPeriod);
bool periodFlushed = true;
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
- mReactor.startPeriodTransition(thirdPeriod, false);
+ mReactor.startPeriodTransition(thirdPeriod);
EXPECT_TRUE(
mReactor.addHwVsyncTimestamp(sampleTime += secondPeriod, std::nullopt, &periodFlushed));
EXPECT_FALSE(periodFlushed);
@@ -289,14 +289,14 @@
TEST_F(VSyncReactorTest, presentFenceAdditionDoesNotInterruptConfirmationProcess) {
nsecs_t const newPeriod = 5000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
EXPECT_TRUE(mReactor.addPresentFence(generateSignalledFenceWithTime(0)));
}
TEST_F(VSyncReactorTest, setPeriodCalledFirstTwoEventsNewPeriod) {
nsecs_t const newPeriod = 5000;
EXPECT_CALL(*mMockTracker, setPeriod(_)).Times(0);
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
bool periodFlushed = true;
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(5000, std::nullopt, &periodFlushed));
@@ -321,7 +321,7 @@
bool periodFlushed = false;
nsecs_t const newPeriod = 4000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
auto time = 0;
auto constexpr numTimestampSubmissions = 10;
@@ -346,7 +346,7 @@
bool periodFlushed = false;
nsecs_t const newPeriod = 4000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
auto time = 0;
// If the power mode is not DOZE or DOZE_SUSPEND, it is still collecting timestamps.
@@ -363,7 +363,7 @@
auto time = 0;
bool periodFlushed = false;
nsecs_t const newPeriod = 4000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
time += period;
mReactor.addHwVsyncTimestamp(time, std::nullopt, &periodFlushed);
@@ -379,7 +379,7 @@
auto time = 0;
bool periodFlushed = false;
nsecs_t const newPeriod = 4000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
static auto constexpr numSamplesWithNewPeriod = 4;
Sequence seq;
@@ -406,7 +406,7 @@
auto time = 0;
bool periodFlushed = false;
nsecs_t const newPeriod = 4000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
Sequence seq;
EXPECT_CALL(*mMockTracker, needsMoreSamples())
@@ -426,7 +426,7 @@
nsecs_t const newPeriod1 = 4000;
nsecs_t const newPeriod2 = 7000;
- mReactor.startPeriodTransition(newPeriod1, false);
+ mReactor.startPeriodTransition(newPeriod1);
Sequence seq;
EXPECT_CALL(*mMockTracker, needsMoreSamples())
@@ -445,7 +445,7 @@
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
- mReactor.startPeriodTransition(newPeriod2, false);
+ mReactor.startPeriodTransition(newPeriod2);
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod2, std::nullopt, &periodFlushed));
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod2, std::nullopt, &periodFlushed));
@@ -458,7 +458,7 @@
mReactor.setIgnorePresentFences(true);
nsecs_t const newPeriod = 5000;
- mReactor.startPeriodTransition(newPeriod, false);
+ mReactor.startPeriodTransition(newPeriod);
EXPECT_TRUE(mReactor.addHwVsyncTimestamp(0, 0, &periodFlushed));
EXPECT_FALSE(periodFlushed);
@@ -472,9 +472,8 @@
TEST_F(VSyncReactorTest, periodIsMeasuredIfIgnoringComposer) {
// Create a reactor which supports the kernel idle timer
- auto idleReactor =
- VSyncReactor("reactor", std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
- kPendingLimit, true /* supportKernelIdleTimer */);
+ auto idleReactor = VSyncReactor(std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
+ kPendingLimit, true /* supportKernelIdleTimer */);
bool periodFlushed = true;
EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(4);
@@ -482,7 +481,7 @@
// First, set the same period, which should only be confirmed when we receive two
// matching callbacks
- idleReactor.startPeriodTransition(10000, false);
+ idleReactor.startPeriodTransition(10000);
EXPECT_TRUE(idleReactor.addHwVsyncTimestamp(0, 0, &periodFlushed));
EXPECT_FALSE(periodFlushed);
// Correct period but incorrect timestamp delta
@@ -495,7 +494,7 @@
// Then, set a new period, which should be confirmed as soon as we receive a callback
// reporting the new period
nsecs_t const newPeriod = 5000;
- idleReactor.startPeriodTransition(newPeriod, false);
+ idleReactor.startPeriodTransition(newPeriod);
// Incorrect timestamp delta and period
EXPECT_TRUE(idleReactor.addHwVsyncTimestamp(20000, 10000, &periodFlushed));
EXPECT_FALSE(periodFlushed);
diff --git a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
index 3a6068a..f8567bd 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
@@ -29,29 +29,27 @@
EventThread();
~EventThread() override;
- MOCK_METHOD(sp<EventThreadConnection>, createEventConnection,
- (ResyncCallback, EventRegistrationFlags), (const, override));
- MOCK_METHOD(void, onScreenReleased, (), (override));
- MOCK_METHOD(void, onScreenAcquired, (), (override));
- MOCK_METHOD(void, onHotplugReceived, (PhysicalDisplayId, bool), (override));
- MOCK_METHOD(void, onModeChanged, (const scheduler::FrameRateMode&), (override));
- MOCK_METHOD(void, onFrameRateOverridesChanged,
- (PhysicalDisplayId, std::vector<FrameRateOverride>), (override));
- MOCK_METHOD(void, dump, (std::string&), (const, override));
- MOCK_METHOD(void, setDuration,
- (std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration),
- (override));
- MOCK_METHOD(status_t, registerDisplayEventConnection,
- (const sp<android::EventThreadConnection>&), (override));
- MOCK_METHOD(void, setVsyncRate, (uint32_t, const sp<android::EventThreadConnection>&),
- (override));
- MOCK_METHOD(void, requestNextVsync, (const sp<android::EventThreadConnection>&), (override));
+ MOCK_CONST_METHOD2(createEventConnection,
+ sp<EventThreadConnection>(ResyncCallback, EventRegistrationFlags));
+ MOCK_METHOD0(onScreenReleased, void());
+ MOCK_METHOD0(onScreenAcquired, void());
+ MOCK_METHOD2(onHotplugReceived, void(PhysicalDisplayId, bool));
+ MOCK_METHOD1(onModeChanged, void(const scheduler::FrameRateMode &));
+ MOCK_METHOD2(onFrameRateOverridesChanged,
+ void(PhysicalDisplayId, std::vector<FrameRateOverride>));
+ MOCK_CONST_METHOD1(dump, void(std::string&));
+ MOCK_METHOD2(setDuration,
+ void(std::chrono::nanoseconds workDuration,
+ std::chrono::nanoseconds readyDuration));
+ MOCK_METHOD1(registerDisplayEventConnection,
+ status_t(const sp<android::EventThreadConnection> &));
+ MOCK_METHOD2(setVsyncRate, void(uint32_t, const sp<android::EventThreadConnection> &));
+ MOCK_METHOD1(requestNextVsync, void(const sp<android::EventThreadConnection> &));
MOCK_METHOD(VsyncEventData, getLatestVsyncEventData,
- (const sp<android::EventThreadConnection>&), (const, override));
- MOCK_METHOD(void, requestLatestConfig, (const sp<android::EventThreadConnection>&));
- MOCK_METHOD(void, pauseVsyncCallback, (bool));
- MOCK_METHOD(size_t, getEventThreadConnectionCount, (), (override));
- MOCK_METHOD(void, onNewVsyncSchedule, (std::shared_ptr<scheduler::VsyncSchedule>), (override));
+ (const sp<android::EventThreadConnection> &), (const));
+ MOCK_METHOD1(requestLatestConfig, void(const sp<android::EventThreadConnection> &));
+ MOCK_METHOD1(pauseVsyncCallback, void(bool));
+ MOCK_METHOD0(getEventThreadConnectionCount, size_t());
};
} // namespace android::mock
diff --git a/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h b/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
index a8eca21..7d4b159 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
@@ -18,19 +18,19 @@
#include <gmock/gmock.h>
-#include "Scheduler/ISchedulerCallback.h"
+#include "Scheduler/Scheduler.h"
namespace android::scheduler::mock {
struct SchedulerCallback final : ISchedulerCallback {
- MOCK_METHOD(void, setVsyncEnabled, (PhysicalDisplayId, bool), (override));
+ MOCK_METHOD(void, setVsyncEnabled, (bool), (override));
MOCK_METHOD(void, requestDisplayModes, (std::vector<display::DisplayModeRequest>), (override));
MOCK_METHOD(void, kernelTimerChanged, (bool), (override));
MOCK_METHOD(void, triggerOnFrameRateOverridesChanged, (), (override));
};
struct NoOpSchedulerCallback final : ISchedulerCallback {
- void setVsyncEnabled(PhysicalDisplayId, bool) override {}
+ void setVsyncEnabled(bool) override {}
void requestDisplayModes(std::vector<display::DisplayModeRequest>) override {}
void kernelTimerChanged(bool) override {}
void triggerOnFrameRateOverridesChanged() override {}
diff --git a/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h b/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
index 69ec60a..4ef91da 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
@@ -28,12 +28,12 @@
~VsyncController() override;
MOCK_METHOD(bool, addPresentFence, (std::shared_ptr<FenceTime>), (override));
- MOCK_METHOD(bool, addHwVsyncTimestamp, (nsecs_t, std::optional<nsecs_t>, bool*), (override));
- MOCK_METHOD(void, startPeriodTransition, (nsecs_t, bool), (override));
- MOCK_METHOD(void, setIgnorePresentFences, (bool), (override));
+ MOCK_METHOD3(addHwVsyncTimestamp, bool(nsecs_t, std::optional<nsecs_t>, bool*));
+ MOCK_METHOD1(startPeriodTransition, void(nsecs_t));
+ MOCK_METHOD1(setIgnorePresentFences, void(bool));
MOCK_METHOD(void, setDisplayPowerMode, (hal::PowerMode), (override));
- MOCK_METHOD(void, dump, (std::string&), (const, override));
+ MOCK_CONST_METHOD1(dump, void(std::string&));
};
} // namespace android::mock