Merge "CE: Unit test coverage for Output::finishFrame"
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 4026f29..e6e232c 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -92,10 +92,6 @@
static constexpr const char* CACHE_DIR_POSTFIX = "/cache";
static constexpr const char* CODE_CACHE_DIR_POSTFIX = "/code_cache";
-static constexpr const char *kIdMapPath = "/system/bin/idmap";
-static constexpr const char* IDMAP_PREFIX = "/data/resource-cache/";
-static constexpr const char* IDMAP_SUFFIX = "@idmap";
-
// fsverity assumes the page size is always 4096. If not, the feature can not be
// enabled.
static constexpr int kVerityPageSize = 4096;
@@ -2253,206 +2249,6 @@
return res;
}
-static void run_idmap(const char *target_apk, const char *overlay_apk, int idmap_fd)
-{
- execl(kIdMapPath, kIdMapPath, "--fd", target_apk, overlay_apk,
- StringPrintf("%d", idmap_fd).c_str(), (char*)nullptr);
- PLOG(ERROR) << "execl (" << kIdMapPath << ") failed";
-}
-
-static void run_verify_idmap(const char *target_apk, const char *overlay_apk, int idmap_fd)
-{
- execl(kIdMapPath, kIdMapPath, "--verify", target_apk, overlay_apk,
- StringPrintf("%d", idmap_fd).c_str(), (char*)nullptr);
- PLOG(ERROR) << "execl (" << kIdMapPath << ") failed";
-}
-
-static bool delete_stale_idmap(const char* target_apk, const char* overlay_apk,
- const char* idmap_path, int32_t uid) {
- int idmap_fd = open(idmap_path, O_RDWR);
- if (idmap_fd < 0) {
- PLOG(ERROR) << "idmap open failed: " << idmap_path;
- unlink(idmap_path);
- return true;
- }
-
- pid_t pid;
- pid = fork();
- if (pid == 0) {
- /* child -- drop privileges before continuing */
- if (setgid(uid) != 0) {
- LOG(ERROR) << "setgid(" << uid << ") failed during idmap";
- exit(1);
- }
- if (setuid(uid) != 0) {
- LOG(ERROR) << "setuid(" << uid << ") failed during idmap";
- exit(1);
- }
- if (flock(idmap_fd, LOCK_EX | LOCK_NB) != 0) {
- PLOG(ERROR) << "flock(" << idmap_path << ") failed during idmap";
- exit(1);
- }
-
- run_verify_idmap(target_apk, overlay_apk, idmap_fd);
- exit(1); /* only if exec call to deleting stale idmap failed */
- } else {
- int status = wait_child(pid);
- close(idmap_fd);
-
- if (status != 0) {
- // Failed on verifying if idmap is made from target_apk and overlay_apk.
- LOG(DEBUG) << "delete stale idmap: " << idmap_path;
- unlink(idmap_path);
- return true;
- }
- }
- return false;
-}
-
-// Transform string /a/b/c.apk to (prefix)/a@b@c.apk@(suffix)
-// eg /a/b/c.apk to /data/resource-cache/a@b@c.apk@idmap
-static int flatten_path(const char *prefix, const char *suffix,
- const char *overlay_path, char *idmap_path, size_t N)
-{
- if (overlay_path == nullptr || idmap_path == nullptr) {
- return -1;
- }
- const size_t len_overlay_path = strlen(overlay_path);
- // will access overlay_path + 1 further below; requires absolute path
- if (len_overlay_path < 2 || *overlay_path != '/') {
- return -1;
- }
- const size_t len_idmap_root = strlen(prefix);
- const size_t len_suffix = strlen(suffix);
- if (SIZE_MAX - len_idmap_root < len_overlay_path ||
- SIZE_MAX - (len_idmap_root + len_overlay_path) < len_suffix) {
- // additions below would cause overflow
- return -1;
- }
- if (N < len_idmap_root + len_overlay_path + len_suffix) {
- return -1;
- }
- memset(idmap_path, 0, N);
- snprintf(idmap_path, N, "%s%s%s", prefix, overlay_path + 1, suffix);
- char *ch = idmap_path + len_idmap_root;
- while (*ch != '\0') {
- if (*ch == '/') {
- *ch = '@';
- }
- ++ch;
- }
- return 0;
-}
-
-binder::Status InstalldNativeService::idmap(const std::string& targetApkPath,
- const std::string& overlayApkPath, int32_t uid) {
- ENFORCE_UID(AID_SYSTEM);
- CHECK_ARGUMENT_PATH(targetApkPath);
- CHECK_ARGUMENT_PATH(overlayApkPath);
- std::lock_guard<std::recursive_mutex> lock(mLock);
-
- const char* target_apk = targetApkPath.c_str();
- const char* overlay_apk = overlayApkPath.c_str();
- ALOGV("idmap target_apk=%s overlay_apk=%s uid=%d\n", target_apk, overlay_apk, uid);
-
- int idmap_fd = -1;
- char idmap_path[PATH_MAX];
- struct stat idmap_stat;
- bool outdated = false;
-
- if (flatten_path(IDMAP_PREFIX, IDMAP_SUFFIX, overlay_apk,
- idmap_path, sizeof(idmap_path)) == -1) {
- ALOGE("idmap cannot generate idmap path for overlay %s\n", overlay_apk);
- goto fail;
- }
-
- if (stat(idmap_path, &idmap_stat) < 0) {
- outdated = true;
- } else {
- outdated = delete_stale_idmap(target_apk, overlay_apk, idmap_path, uid);
- }
-
- if (outdated) {
- idmap_fd = open(idmap_path, O_RDWR | O_CREAT | O_EXCL, 0644);
- } else {
- idmap_fd = open(idmap_path, O_RDWR);
- }
-
- if (idmap_fd < 0) {
- ALOGE("idmap cannot open '%s' for output: %s\n", idmap_path, strerror(errno));
- goto fail;
- }
- if (fchown(idmap_fd, AID_SYSTEM, uid) < 0) {
- ALOGE("idmap cannot chown '%s'\n", idmap_path);
- goto fail;
- }
- if (fchmod(idmap_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) < 0) {
- ALOGE("idmap cannot chmod '%s'\n", idmap_path);
- goto fail;
- }
-
- if (!outdated) {
- close(idmap_fd);
- return ok();
- }
-
- pid_t pid;
- pid = fork();
- if (pid == 0) {
- /* child -- drop privileges before continuing */
- if (setgid(uid) != 0) {
- ALOGE("setgid(%d) failed during idmap\n", uid);
- exit(1);
- }
- if (setuid(uid) != 0) {
- ALOGE("setuid(%d) failed during idmap\n", uid);
- exit(1);
- }
- if (flock(idmap_fd, LOCK_EX | LOCK_NB) != 0) {
- ALOGE("flock(%s) failed during idmap: %s\n", idmap_path, strerror(errno));
- exit(1);
- }
-
- run_idmap(target_apk, overlay_apk, idmap_fd);
- exit(1); /* only if exec call to idmap failed */
- } else {
- int status = wait_child(pid);
- if (status != 0) {
- ALOGE("idmap failed, status=0x%04x\n", status);
- goto fail;
- }
- }
-
- close(idmap_fd);
- return ok();
-fail:
- if (idmap_fd >= 0) {
- close(idmap_fd);
- unlink(idmap_path);
- }
- return error();
-}
-
-binder::Status InstalldNativeService::removeIdmap(const std::string& overlayApkPath) {
- ENFORCE_UID(AID_SYSTEM);
- CHECK_ARGUMENT_PATH(overlayApkPath);
- std::lock_guard<std::recursive_mutex> lock(mLock);
-
- const char* overlay_apk = overlayApkPath.c_str();
- char idmap_path[PATH_MAX];
-
- if (flatten_path(IDMAP_PREFIX, IDMAP_SUFFIX, overlay_apk,
- idmap_path, sizeof(idmap_path)) == -1) {
- ALOGE("idmap cannot generate idmap path for overlay %s\n", overlay_apk);
- return error();
- }
- if (unlink(idmap_path) < 0) {
- ALOGE("couldn't unlink idmap file %s\n", idmap_path);
- return error();
- }
- return ok();
-}
-
binder::Status InstalldNativeService::restoreconAppData(const std::unique_ptr<std::string>& uuid,
const std::string& packageName, int32_t userId, int32_t flags, int32_t appId,
const std::string& seInfo) {
diff --git a/cmds/installd/InstalldNativeService.h b/cmds/installd/InstalldNativeService.h
index 2b7bf33..ef91bf8 100644
--- a/cmds/installd/InstalldNativeService.h
+++ b/cmds/installd/InstalldNativeService.h
@@ -119,9 +119,6 @@
binder::Status destroyProfileSnapshot(const std::string& packageName,
const std::string& profileName);
- binder::Status idmap(const std::string& targetApkPath, const std::string& overlayApkPath,
- int32_t uid);
- binder::Status removeIdmap(const std::string& overlayApkPath);
binder::Status rmPackageDir(const std::string& packageDir);
binder::Status markBootComplete(const std::string& instructionSet);
binder::Status freeCache(const std::unique_ptr<std::string>& uuid, int64_t targetFreeBytes,
diff --git a/cmds/installd/binder/android/os/IInstalld.aidl b/cmds/installd/binder/android/os/IInstalld.aidl
index d99bcc8..6cc4bde 100644
--- a/cmds/installd/binder/android/os/IInstalld.aidl
+++ b/cmds/installd/binder/android/os/IInstalld.aidl
@@ -72,8 +72,6 @@
@utf8InCpp String profileName, @utf8InCpp String classpath);
void destroyProfileSnapshot(@utf8InCpp String packageName, @utf8InCpp String profileName);
- void idmap(@utf8InCpp String targetApkPath, @utf8InCpp String overlayApkPath, int uid);
- void removeIdmap(@utf8InCpp String overlayApkPath);
void rmPackageDir(@utf8InCpp String packageDir);
void markBootComplete(@utf8InCpp String instructionSet);
void freeCache(@nullable @utf8InCpp String uuid, long targetFreeBytes,
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 616c3b2..f95e445 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -339,6 +339,10 @@
? "dalvik.vm.dex2oat-threads"
: "dalvik.vm.boot-dex2oat-threads";
std::string dex2oat_threads_arg = MapPropertyToArg(threads_property, "-j%s");
+ const char* cpu_set_property = post_bootcomplete
+ ? "dalvik.vm.dex2oat-cpu-set"
+ : "dalvik.vm.boot-dex2oat-cpu-set";
+ std::string dex2oat_cpu_set_arg = MapPropertyToArg(cpu_set_property, "--cpu-set=%s");
std::string bootclasspath;
char* dex2oat_bootclasspath = getenv("DEX2OATBOOTCLASSPATH");
@@ -518,6 +522,7 @@
AddArg(image_block_size_arg);
AddArg(dex2oat_compiler_filter_arg);
AddArg(dex2oat_threads_arg);
+ AddArg(dex2oat_cpu_set_arg);
AddArg(dex2oat_swap_fd);
AddArg(dex2oat_image_fd);
diff --git a/cmds/installd/migrate_legacy_obb_data.sh b/cmds/installd/migrate_legacy_obb_data.sh
index 0e6d7b9..7399681 100644
--- a/cmds/installd/migrate_legacy_obb_data.sh
+++ b/cmds/installd/migrate_legacy_obb_data.sh
@@ -15,17 +15,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-rm -rf /data/media/Android/obb/test_probe
-mkdir -p /data/media/Android/obb/
-touch /data/media/Android/obb/test_probe
+rm -rf /data/media/0/Android/obb/test_probe
+mkdir -p /data/media/0/Android/obb/
+touch /data/media/0/Android/obb/test_probe
if ! test -f /data/media/0/Android/obb/test_probe ; then
log -p i -t migrate_legacy_obb_data "No support for 'unshared_obb'. Not migrating"
- rm -rf /data/media/Android/obb/test_probe
+ rm -rf /data/media/0/Android/obb/test_probe
exit 0
fi
# Delete the test file, and remove the obb folder if it is empty
-rm -rf /data/media/Android/obb/test_probe
+rm -rf /data/media/0/Android/obb/test_probe
rmdir /data/media/obb
if ! test -d /data/media/obb ; then
diff --git a/cmds/installd/otapreopt.cpp b/cmds/installd/otapreopt.cpp
index db36ce3..eefbe4f 100644
--- a/cmds/installd/otapreopt.cpp
+++ b/cmds/installd/otapreopt.cpp
@@ -480,6 +480,10 @@
"-j",
false,
cmd);
+ AddCompilerOptionFromSystemProperty("dalvik.vm.image-dex2oat-cpu-set",
+ "--cpu-set=",
+ false,
+ cmd);
AddCompilerOptionFromSystemProperty(
StringPrintf("dalvik.vm.isa.%s.variant", isa).c_str(),
"--instruction-set-variant=",
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 861401c..141171b 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -68,7 +68,15 @@
}
#endif // !VENDORSERVICEMANAGER
-ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {}
+ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {
+#ifndef VENDORSERVICEMANAGER
+ // can process these at any times, don't want to delay first VINTF client
+ std::thread([] {
+ vintf::VintfObject::GetDeviceHalManifest();
+ vintf::VintfObject::GetFrameworkHalManifest();
+ }).detach();
+#endif // !VENDORSERVICEMANAGER
+}
ServiceManager::~ServiceManager() {
// this should only happen in tests
@@ -306,7 +314,7 @@
if (listeners.empty()) {
*it = mNameToCallback.erase(*it);
} else {
- it++;
+ (*it)++;
}
}
diff --git a/libs/binder/IServiceManager.cpp b/libs/binder/IServiceManager.cpp
index 4f47db1..bac8b66 100644
--- a/libs/binder/IServiceManager.cpp
+++ b/libs/binder/IServiceManager.cpp
@@ -280,19 +280,31 @@
std::condition_variable mCv;
};
+ // Simple RAII object to ensure a function call immediately before going out of scope
+ class Defer {
+ public:
+ Defer(std::function<void()>&& f) : mF(std::move(f)) {}
+ ~Defer() { mF(); }
+ private:
+ std::function<void()> mF;
+ };
+
const std::string name = String8(name16).c_str();
sp<IBinder> out;
if (!mTheRealServiceManager->getService(name, &out).isOk()) {
return nullptr;
}
- if(out != nullptr) return out;
+ if (out != nullptr) return out;
sp<Waiter> waiter = new Waiter;
if (!mTheRealServiceManager->registerForNotifications(
name, waiter).isOk()) {
return nullptr;
}
+ Defer unregister ([&] {
+ mTheRealServiceManager->unregisterForNotifications(name, waiter);
+ });
while(true) {
{
@@ -316,7 +328,7 @@
if (!mTheRealServiceManager->getService(name, &out).isOk()) {
return nullptr;
}
- if(out != nullptr) return out;
+ if (out != nullptr) return out;
ALOGW("Waited one second for %s", name.c_str());
}
diff --git a/libs/binder/Status.cpp b/libs/binder/Status.cpp
index 0ad99ce..674f065 100644
--- a/libs/binder/Status.cpp
+++ b/libs/binder/Status.cpp
@@ -232,9 +232,10 @@
ret.append("No error");
} else {
ret.appendFormat("Status(%d, %s): '", mException, exceptionToString(mException).c_str());
- if (mException == EX_SERVICE_SPECIFIC ||
- mException == EX_TRANSACTION_FAILED) {
+ if (mException == EX_SERVICE_SPECIFIC) {
ret.appendFormat("%d: ", mErrorCode);
+ } else if (mException == EX_TRANSACTION_FAILED) {
+ ret.appendFormat("%s: ", statusToString(mErrorCode).c_str());
}
ret.append(String8(mMessage));
ret.append("'");
diff --git a/libs/binder/ndk/include_ndk/android/binder_ibinder.h b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
index 4d5c044..4560f22 100644
--- a/libs/binder/ndk/include_ndk/android/binder_ibinder.h
+++ b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
@@ -34,6 +34,12 @@
#include <android/binder_status.h>
__BEGIN_DECLS
+
+#ifndef __ANDROID_API__
+#error Android builds must be compiled against a specific API. If this is an \
+ android platform host build, you must use libbinder_ndk_host_user.
+#endif
+
#if __ANDROID_API__ >= 29
// Also see TF_* in kernel's binder.h
diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
index f3bc31b..7871667 100644
--- a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
+++ b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
@@ -441,6 +441,42 @@
}
/**
+ * Writes a ScopedFileDescriptor object inside a std::vector<ScopedFileDescriptor> at index 'index'
+ * to 'parcel'.
+ */
+template <>
+inline binder_status_t AParcel_writeStdVectorParcelableElement<ScopedFileDescriptor>(
+ AParcel* parcel, const void* vectorData, size_t index) {
+ const std::vector<ScopedFileDescriptor>* vector =
+ static_cast<const std::vector<ScopedFileDescriptor>*>(vectorData);
+ int writeFd = vector->at(index).get();
+ if (writeFd < 0) {
+ return STATUS_UNEXPECTED_NULL;
+ }
+ return AParcel_writeParcelFileDescriptor(parcel, writeFd);
+}
+
+/**
+ * Reads a ScopedFileDescriptor object inside a std::vector<ScopedFileDescriptor> at index 'index'
+ * from 'parcel'.
+ */
+template <>
+inline binder_status_t AParcel_readStdVectorParcelableElement<ScopedFileDescriptor>(
+ const AParcel* parcel, void* vectorData, size_t index) {
+ std::vector<ScopedFileDescriptor>* vector =
+ static_cast<std::vector<ScopedFileDescriptor>*>(vectorData);
+ int readFd;
+ binder_status_t status = AParcel_readParcelFileDescriptor(parcel, &readFd);
+ if (status == STATUS_OK) {
+ if (readFd < 0) {
+ return STATUS_UNEXPECTED_NULL;
+ }
+ vector->at(index).set(readFd);
+ }
+ return status;
+}
+
+/**
* Convenience API for writing a std::vector<P>
*/
template <typename P>
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index db4a36b..94ab9f0 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -1034,9 +1034,9 @@
binder_buffer_object obj {
.hdr = { .type = BINDER_TYPE_PTR },
+ .flags = 0,
.buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
.length = 4,
- .flags = 0,
};
data.setDataCapacity(1024);
// Write a bogus object at offset 0 to get an entry in the offset table
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 45fea85..4ee9f55 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -397,7 +397,7 @@
if (deleteMapEntry(gTisMapFd, &key) && errno != ENOENT) return false;
}
- concurrent_val_t czeros = {.policy = {0}, .active = {0}};
+ concurrent_val_t czeros = { .active = {0}, .policy = {0}, };
std::vector<concurrent_val_t> cvals(gNCpus, czeros);
for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
if (writeToMapEntry(gConcurrentMapFd, &key, cvals.data(), BPF_EXIST) && errno != ENOENT)
diff --git a/libs/nativewindow/Android.bp b/libs/nativewindow/Android.bp
index 27ab482..55400c7 100644
--- a/libs/nativewindow/Android.bp
+++ b/libs/nativewindow/Android.bp
@@ -85,6 +85,11 @@
export_header_lib_headers: [
"libnativebase_headers",
],
+
+ stubs: {
+ symbol_file: "libnativewindow.map.txt",
+ versions: ["29"],
+ },
}
llndk_library {
diff --git a/libs/nativewindow/libnativewindow.map.txt b/libs/nativewindow/libnativewindow.map.txt
index daf1dcc..f59e8f0 100644
--- a/libs/nativewindow/libnativewindow.map.txt
+++ b/libs/nativewindow/libnativewindow.map.txt
@@ -2,9 +2,9 @@
global:
AHardwareBuffer_acquire;
AHardwareBuffer_allocate;
- AHardwareBuffer_createFromHandle; # llndk
+ AHardwareBuffer_createFromHandle; # llndk # apex
AHardwareBuffer_describe;
- AHardwareBuffer_getNativeHandle; # llndk
+ AHardwareBuffer_getNativeHandle; # llndk # apex
AHardwareBuffer_isSupported; # introduced=29
AHardwareBuffer_lock;
AHardwareBuffer_lockAndGetInfo; # introduced=29
diff --git a/libs/nativewindow/tests/AHardwareBufferTest.cpp b/libs/nativewindow/tests/AHardwareBufferTest.cpp
index cc2731d..71b1f9f 100644
--- a/libs/nativewindow/tests/AHardwareBufferTest.cpp
+++ b/libs/nativewindow/tests/AHardwareBufferTest.cpp
@@ -20,6 +20,7 @@
#include <android/hardware_buffer.h>
#include <private/android/AHardwareBufferHelpers.h>
#include <android/hardware/graphics/common/1.0/types.h>
+#include <vndk/hardware_buffer.h>
#include <gtest/gtest.h>
@@ -100,9 +101,33 @@
(uint64_t)BufferUsage::CPU_WRITE_RARELY,
AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY));
-EXPECT_TRUE(TestUsageConversion(
+ EXPECT_TRUE(TestUsageConversion(
(uint64_t)BufferUsage::GPU_RENDER_TARGET | (uint64_t)BufferUsage::GPU_TEXTURE |
- 1ull << 29 | 1ull << 57,
+ 1ull << 29 | 1ull << 57,
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_VENDOR_1 | AHARDWAREBUFFER_USAGE_VENDOR_13));
}
+
+TEST(AHardwareBufferTest, GetCreateHandleTest) {
+ AHardwareBuffer_Desc desc{
+ .width = 64,
+ .height = 1,
+ .layers = 1,
+ .format = AHARDWAREBUFFER_FORMAT_BLOB,
+ .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ .stride = 64,
+ };
+
+ AHardwareBuffer* buffer = nullptr;
+ EXPECT_EQ(0, AHardwareBuffer_allocate(&desc, &buffer));
+ const native_handle_t* handle = AHardwareBuffer_getNativeHandle(buffer);
+ EXPECT_NE(nullptr, handle);
+
+ AHardwareBuffer* otherBuffer = nullptr;
+ EXPECT_EQ(0, AHardwareBuffer_createFromHandle(
+ &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &otherBuffer));
+ EXPECT_NE(nullptr, otherBuffer);
+
+ AHardwareBuffer_release(buffer);
+ AHardwareBuffer_release(otherBuffer);
+}
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index 8c1991e..8863ec2 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -365,55 +365,87 @@
class FakeInputReceiver {
public:
- void consumeEvent(int32_t expectedEventType, int32_t expectedDisplayId,
- int32_t expectedFlags = 0) {
+ InputEvent* consume() {
uint32_t consumeSeq;
InputEvent* event;
- status_t status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1,
- &consumeSeq, &event);
- ASSERT_EQ(OK, status)
- << mName.c_str() << ": consumer consume should return OK.";
- ASSERT_TRUE(event != nullptr)
- << mName.c_str() << ": consumer should have returned non-NULL event.";
+ std::chrono::time_point start = std::chrono::steady_clock::now();
+ status_t status = WOULD_BLOCK;
+ while (status == WOULD_BLOCK) {
+ status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1, &consumeSeq,
+ &event);
+ std::chrono::duration elapsed = std::chrono::steady_clock::now() - start;
+ if (elapsed > 100ms) {
+ break;
+ }
+ }
+
+ if (status == WOULD_BLOCK) {
+ // Just means there's no event available.
+ return nullptr;
+ }
+
+ if (status != OK) {
+ ADD_FAILURE() << mName.c_str() << ": consumer consume should return OK.";
+ return nullptr;
+ }
+ if (event == nullptr) {
+ ADD_FAILURE() << "Consumed correctly, but received NULL event from consumer";
+ return nullptr;
+ }
+
+ status = mConsumer->sendFinishedSignal(consumeSeq, handled());
+ if (status != OK) {
+ ADD_FAILURE() << mName.c_str() << ": consumer sendFinishedSignal should return OK.";
+ }
+ return event;
+ }
+
+ void consumeEvent(int32_t expectedEventType, int32_t expectedAction, int32_t expectedDisplayId,
+ int32_t expectedFlags) {
+ InputEvent* event = consume();
+
+ ASSERT_NE(nullptr, event) << mName.c_str()
+ << ": consumer should have returned non-NULL event.";
ASSERT_EQ(expectedEventType, event->getType())
<< mName.c_str() << ": event type should match.";
- ASSERT_EQ(expectedDisplayId, event->getDisplayId())
- << mName.c_str() << ": event displayId should be the same as expected.";
+ EXPECT_EQ(expectedDisplayId, event->getDisplayId());
- int32_t flags;
switch (expectedEventType) {
case AINPUT_EVENT_TYPE_KEY: {
- KeyEvent* typedEvent = static_cast<KeyEvent*>(event);
- flags = typedEvent->getFlags();
+ const KeyEvent& keyEvent = static_cast<const KeyEvent&>(*event);
+ EXPECT_EQ(expectedAction, keyEvent.getAction());
+ EXPECT_EQ(expectedFlags, keyEvent.getFlags());
break;
}
case AINPUT_EVENT_TYPE_MOTION: {
- MotionEvent* typedEvent = static_cast<MotionEvent*>(event);
- flags = typedEvent->getFlags();
+ const MotionEvent& motionEvent = static_cast<const MotionEvent&>(*event);
+ EXPECT_EQ(expectedAction, motionEvent.getAction());
+ EXPECT_EQ(expectedFlags, motionEvent.getFlags());
break;
}
default: {
FAIL() << mName.c_str() << ": invalid event type: " << expectedEventType;
}
}
- ASSERT_EQ(expectedFlags, flags)
- << mName.c_str() << ": event flags should be the same as expected.";
+ }
- status = mConsumer->sendFinishedSignal(consumeSeq, handled());
- ASSERT_EQ(OK, status)
- << mName.c_str() << ": consumer sendFinishedSignal should return OK.";
+ void consumeKeyDown(int32_t expectedDisplayId, int32_t expectedFlags = 0) {
+ consumeEvent(AINPUT_EVENT_TYPE_KEY, AKEY_EVENT_ACTION_DOWN, expectedDisplayId,
+ expectedFlags);
+ }
+
+ void consumeMotionDown(int32_t expectedDisplayId, int32_t expectedFlags = 0) {
+ consumeEvent(AINPUT_EVENT_TYPE_MOTION, AMOTION_EVENT_ACTION_DOWN, expectedDisplayId,
+ expectedFlags);
}
void assertNoEvents() {
- uint32_t consumeSeq;
- InputEvent* event;
- status_t status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1,
- &consumeSeq, &event);
- ASSERT_NE(OK, status)
+ InputEvent* event = consume();
+ ASSERT_EQ(nullptr, event)
<< mName.c_str()
- << ": should not have received any events, so consume(..) should not return OK.";
+ << ": should not have received any events, so consume() should return NULL";
}
protected:
@@ -611,7 +643,7 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Window should receive motion event.
- window->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ window->consumeMotionDown(ADISPLAY_ID_DEFAULT);
}
// The foreground window should receive the first touch down event.
@@ -632,7 +664,7 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Top window should receive the touch down event. Second window should not receive anything.
- windowTop->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowTop->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowSecond->assertNoEvents();
}
@@ -658,7 +690,7 @@
// Focused window should receive event.
windowTop->assertNoEvents();
- windowSecond->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowSecond->consumeKeyDown(ADISPLAY_ID_NONE);
}
TEST_F(InputDispatcherTest, SetInputWindow_FocusPriority) {
@@ -683,7 +715,7 @@
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Top focused window should receive event.
- windowTop->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowTop->consumeKeyDown(ADISPLAY_ID_NONE);
windowSecond->assertNoEvents();
}
@@ -713,7 +745,7 @@
// Top window is invalid, so it should not receive any input event.
windowTop->assertNoEvents();
- windowSecond->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowSecond->consumeKeyDown(ADISPLAY_ID_NONE);
}
TEST_F(InputDispatcherTest, DispatchMouseEventsUnderCursor) {
@@ -738,7 +770,7 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED,
injectMotionEvent(mDispatcher, AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_MOUSE,
ADISPLAY_ID_DEFAULT, 610, 400, 599, 400));
- windowLeft->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowLeft->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowRight->assertNoEvents();
}
@@ -794,7 +826,7 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectMotionDown(mDispatcher,
AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
// Test touch down on second display.
@@ -802,29 +834,29 @@
AINPUT_SOURCE_TOUCHSCREEN, SECOND_DISPLAY_ID))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
+ windowInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
}
TEST_F(InputDispatcherFocusOnTwoDisplaysTest, SetInputWindow_MultiDisplayFocus) {
// Test inject a key down with display id specified.
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectKeyDown(mDispatcher, ADISPLAY_ID_DEFAULT))
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeKeyDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
// Test inject a key down without display id specified.
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectKeyDown(mDispatcher))
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
// Remove secondary display.
std::vector<sp<InputWindowHandle>> noWindows;
mDispatcher->setInputWindows(noWindows, SECOND_DISPLAY_ID);
// Expect old focus should receive a cancel event.
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE,
- AKEY_EVENT_FLAG_CANCELED);
+ windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, AKEY_EVENT_ACTION_UP, ADISPLAY_ID_NONE,
+ AKEY_EVENT_FLAG_CANCELED);
// Test inject a key down, should timeout because of no target window.
ASSERT_EQ(INPUT_EVENT_INJECTION_TIMED_OUT, injectKeyDown(mDispatcher))
@@ -853,8 +885,8 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectMotionDown(mDispatcher,
AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
- monitorInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
+ monitorInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
monitorInSecondary->assertNoEvents();
@@ -864,8 +896,8 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
+ windowInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
+ monitorInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
// Test inject a non-pointer motion event.
// If specific a display, it will dispatch to the focused window of particular display,
@@ -875,8 +907,8 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_NONE);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeMotionDown(ADISPLAY_ID_NONE);
+ monitorInSecondary->consumeMotionDown(ADISPLAY_ID_NONE);
}
// Test per-display input monitors for key event.
@@ -892,8 +924,8 @@
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
+ monitorInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
}
class InputFilterTest : public InputDispatcherTest {
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index f529a44..4b71bd8 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -165,7 +165,9 @@
"Scheduler/PhaseOffsets.cpp",
"Scheduler/Scheduler.cpp",
"Scheduler/SchedulerUtils.cpp",
+ "Scheduler/Timer.cpp",
"Scheduler/VSyncDispatchTimerQueue.cpp",
+ "Scheduler/VSyncPredictor.cpp",
"Scheduler/VSyncModulator.cpp",
"StartPropertySetThread.cpp",
"SurfaceFlinger.cpp",
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index 3146256..94c4a81 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -286,7 +286,7 @@
return hasReadyFrame();
}
-bool BufferLayer::onPostComposition(const std::optional<DisplayId>& displayId,
+bool BufferLayer::onPostComposition(sp<const DisplayDevice> displayDevice,
const std::shared_ptr<FenceTime>& glDoneFence,
const std::shared_ptr<FenceTime>& presentFence,
const CompositorTiming& compositorTiming) {
@@ -308,6 +308,14 @@
const int32_t layerId = getSequence();
mFlinger->mTimeStats->setDesiredTime(layerId, mCurrentFrameNumber, desiredPresentTime);
+ const auto outputLayer = findOutputLayerForDisplay(displayDevice);
+ if (outputLayer && outputLayer->requiresClientComposition()) {
+ nsecs_t clientCompositionTimestamp = outputLayer->getState().clientCompositionTimestamp;
+ mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(), mCurrentFrameNumber,
+ clientCompositionTimestamp,
+ FrameTracer::FrameEvent::FALLBACK_COMPOSITION);
+ }
+
std::shared_ptr<FenceTime> frameReadyFence = mBufferInfo.mFenceTime;
if (frameReadyFence->isValid()) {
mFrameTracker.setFrameReadyFence(std::move(frameReadyFence));
@@ -317,6 +325,7 @@
mFrameTracker.setFrameReadyTime(desiredPresentTime);
}
+ const auto displayId = displayDevice->getId();
if (presentFence->isValid()) {
mFlinger->mTimeStats->setPresentFence(layerId, mCurrentFrameNumber, presentFence);
mFlinger->mFrameTracer->traceFence(layerId, getCurrentBufferId(), mCurrentFrameNumber,
diff --git a/services/surfaceflinger/BufferLayer.h b/services/surfaceflinger/BufferLayer.h
index 656ba12..16855d2 100644
--- a/services/surfaceflinger/BufferLayer.h
+++ b/services/surfaceflinger/BufferLayer.h
@@ -78,7 +78,7 @@
bool isHdrY410() const override;
- bool onPostComposition(const std::optional<DisplayId>& displayId,
+ bool onPostComposition(sp<const DisplayDevice> displayDevice,
const std::shared_ptr<FenceTime>& glDoneFence,
const std::shared_ptr<FenceTime>& presentFence,
const CompositorTiming& compositorTiming) override;
diff --git a/services/surfaceflinger/BufferQueueLayer.cpp b/services/surfaceflinger/BufferQueueLayer.cpp
index 9580ad5..d51d34b 100644
--- a/services/surfaceflinger/BufferQueueLayer.cpp
+++ b/services/surfaceflinger/BufferQueueLayer.cpp
@@ -307,8 +307,6 @@
}
uint64_t bufferID = mQueueItems[0].mGraphicBuffer->getId();
- mFlinger->mTimeStats->setAcquireFence(layerId, currentFrameNumber,
- mQueueItems[0].mFenceTime);
mFlinger->mFrameTracer->traceFence(layerId, bufferID, currentFrameNumber,
mQueueItems[0].mFenceTime,
FrameTracer::FrameEvent::ACQUIRE_FENCE);
diff --git a/services/surfaceflinger/ClientCache.cpp b/services/surfaceflinger/ClientCache.cpp
index 16fe27c..a5be01c 100644
--- a/services/surfaceflinger/ClientCache.cpp
+++ b/services/surfaceflinger/ClientCache.cpp
@@ -42,7 +42,7 @@
return false;
}
- auto& processBuffers = it->second;
+ auto& processBuffers = it->second.second;
auto bufItr = processBuffers.find(id);
if (bufItr == processBuffers.end()) {
@@ -86,12 +86,14 @@
return false;
}
auto [itr, success] =
- mBuffers.emplace(processToken, std::unordered_map<uint64_t, ClientCacheBuffer>());
+ mBuffers.emplace(processToken,
+ std::make_pair(token,
+ std::unordered_map<uint64_t, ClientCacheBuffer>()));
LOG_ALWAYS_FATAL_IF(!success, "failed to insert new process into client cache");
it = itr;
}
- auto& processBuffers = it->second;
+ auto& processBuffers = it->second.second;
if (processBuffers.size() > BUFFER_CACHE_MAX_SIZE) {
ALOGE("failed to cache buffer: cache is full");
@@ -120,7 +122,7 @@
}
}
- mBuffers[processToken].erase(id);
+ mBuffers[processToken].second.erase(id);
}
for (auto& recipient : pendingErase) {
@@ -180,7 +182,7 @@
return;
}
- for (auto& [id, clientCacheBuffer] : itr->second) {
+ for (auto& [id, clientCacheBuffer] : itr->second.second) {
client_cache_t cacheId = {processToken, id};
for (auto& recipient : clientCacheBuffer.recipients) {
sp<ErasedRecipient> erasedRecipient = recipient.promote();
diff --git a/services/surfaceflinger/ClientCache.h b/services/surfaceflinger/ClientCache.h
index aa6c80d..d7af7c0 100644
--- a/services/surfaceflinger/ClientCache.h
+++ b/services/surfaceflinger/ClientCache.h
@@ -61,7 +61,8 @@
std::set<wp<ErasedRecipient>> recipients;
};
std::map<wp<IBinder> /*caching process*/,
- std::unordered_map<uint64_t /*cache id*/, ClientCacheBuffer>>
+ std::pair<sp<IBinder> /*strong ref to caching process*/,
+ std::unordered_map<uint64_t /*cache id*/, ClientCacheBuffer>>>
mBuffers GUARDED_BY(mMutex);
class CacheDeathRecipient : public IBinder::DeathRecipient {
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
index 1347449..11cfccc 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
@@ -98,6 +98,9 @@
// Debugging
void dump(std::string& result) const;
+
+ // Timestamp for when the layer is queued for client composition
+ nsecs_t clientCompositionTimestamp;
};
} // namespace compositionengine::impl
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index 1953005..6877f8b 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -904,6 +904,7 @@
layerSettings.disableBlending = true;
}
+ layer->editState().clientCompositionTimestamp = systemTime();
clientCompositionLayers.push_back(*result);
}
}
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
index 1562416..242ccd5 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
@@ -511,6 +511,31 @@
}
/*
+ * Output::setReleasedLayers()
+ */
+
+using OutputSetReleasedLayersTest = OutputTest;
+
+TEST_F(OutputSetReleasedLayersTest, setReleasedLayersTakesGivenLayers) {
+ sp<StrictMock<mock::LayerFE>> layer1FE{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> layer2FE{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> layer3FE{new StrictMock<mock::LayerFE>()};
+
+ Output::ReleasedLayers layers;
+ layers.push_back(layer1FE);
+ layers.push_back(layer2FE);
+ layers.push_back(layer3FE);
+
+ mOutput->setReleasedLayers(std::move(layers));
+
+ const auto& setLayers = mOutput->getReleasedLayersForTest();
+ ASSERT_EQ(3u, setLayers.size());
+ ASSERT_EQ(layer1FE.get(), setLayers[0].promote().get());
+ ASSERT_EQ(layer2FE.get(), setLayers[1].promote().get());
+ ASSERT_EQ(layer3FE.get(), setLayers[2].promote().get());
+}
+
+/*
* Output::updateAndWriteCompositionState()
*/
@@ -951,6 +976,7 @@
EXPECT_CALL(leftOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(leftLayer, getFEState()).WillRepeatedly(ReturnRef(leftLayerFEState));
EXPECT_CALL(leftLayerFE, prepareClientComposition(_)).WillOnce(Return(leftLayerRESettings));
+ EXPECT_CALL(leftOutputLayer, editState()).WillRepeatedly(ReturnRef(leftOutputLayerState));
EXPECT_CALL(rightOutputLayer, getState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(rightOutputLayer, getLayer()).WillRepeatedly(ReturnRef(rightLayer));
@@ -959,6 +985,7 @@
EXPECT_CALL(rightOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(rightLayer, getFEState()).WillRepeatedly(ReturnRef(rightLayerFEState));
EXPECT_CALL(rightLayerFE, prepareClientComposition(_)).WillOnce(Return(rightLayerRESettings));
+ EXPECT_CALL(rightOutputLayer, editState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
@@ -1011,6 +1038,7 @@
EXPECT_CALL(outputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(layer, getFEState()).WillRepeatedly(ReturnRef(layerFEState));
EXPECT_CALL(layerFE, prepareClientComposition(_)).Times(0);
+ EXPECT_CALL(outputLayer, editState()).WillRepeatedly(ReturnRef(outputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u)).WillRepeatedly(Return(&outputLayer));
@@ -1076,6 +1104,7 @@
EXPECT_CALL(leftOutputLayer, requiresClientComposition()).WillRepeatedly(Return(false));
EXPECT_CALL(leftOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(leftLayer, getFEState()).WillRepeatedly(ReturnRef(leftLayerFEState));
+ EXPECT_CALL(leftOutputLayer, editState()).WillRepeatedly(ReturnRef(leftOutputLayerState));
EXPECT_CALL(rightOutputLayer, getState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(rightOutputLayer, getLayer()).WillRepeatedly(ReturnRef(rightLayer));
@@ -1084,6 +1113,7 @@
EXPECT_CALL(rightOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(rightLayer, getFEState()).WillRepeatedly(ReturnRef(rightLayerFEState));
EXPECT_CALL(rightLayerFE, prepareClientComposition(_)).WillOnce(Return(rightLayerRESettings));
+ EXPECT_CALL(rightOutputLayer, editState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 5a19d8a..ce9aab5 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1370,6 +1370,8 @@
if (newTimestamps) {
mFlinger->mTimeStats->setPostTime(getSequence(), newTimestamps->frameNumber,
getName().c_str(), newTimestamps->postedTime);
+ mFlinger->mTimeStats->setAcquireFence(getSequence(), newTimestamps->frameNumber,
+ newTimestamps->acquireFence);
}
Mutex::Autolock lock(mFrameEventHistoryMutex);
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 1388612..286311b 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -531,7 +531,7 @@
* called after composition.
* returns true if the layer latched a new buffer this frame.
*/
- virtual bool onPostComposition(const std::optional<DisplayId>& /*displayId*/,
+ virtual bool onPostComposition(sp<const DisplayDevice> /*displayDevice*/,
const std::shared_ptr<FenceTime>& /*glDoneFence*/,
const std::shared_ptr<FenceTime>& /*presentFence*/,
const CompositorTiming& /*compositorTiming*/) {
@@ -601,6 +601,8 @@
virtual sp<GraphicBuffer> getBuffer() const { return nullptr; }
+ virtual uint64_t getCurrentFrameNumber() const { return mCurrentFrameNumber; }
+
/*
* Returns if a frame is ready
*/
diff --git a/services/surfaceflinger/Scheduler/Timer.cpp b/services/surfaceflinger/Scheduler/Timer.cpp
new file mode 100644
index 0000000..2394ed2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "SchedulerTimer"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <log/log.h>
+#include <sys/epoll.h>
+#include <sys/timerfd.h>
+#include <sys/unistd.h>
+#include <utils/Trace.h>
+#include <chrono>
+#include <cstdint>
+
+#include "Timer.h"
+
+namespace android::scheduler {
+
+static constexpr size_t kReadPipe = 0;
+static constexpr size_t kWritePipe = 1;
+
+template <class T, size_t N>
+constexpr size_t arrayLen(T (&)[N]) {
+ return N;
+}
+
+Timer::Timer()
+ : mTimerFd(timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK)),
+ mEpollFd(epoll_create1(EPOLL_CLOEXEC)) {
+ if (pipe2(mPipes.data(), O_CLOEXEC | O_NONBLOCK)) {
+ ALOGE("could not create TimerDispatch mPipes");
+ };
+
+ mDispatchThread = std::thread(std::bind(&Timer::dispatch, this));
+}
+
+Timer::~Timer() {
+ endDispatch();
+ mDispatchThread.join();
+
+ close(mPipes[kWritePipe]);
+ close(mPipes[kReadPipe]);
+ close(mEpollFd);
+ close(mTimerFd);
+}
+
+void Timer::endDispatch() {
+ static constexpr unsigned char end = 'e';
+ write(mPipes[kWritePipe], &end, sizeof(end));
+}
+
+nsecs_t Timer::now() const {
+ return systemTime(SYSTEM_TIME_MONOTONIC);
+}
+
+constexpr char const* timerTraceTag = "AlarmInNs";
+void Timer::alarmIn(std::function<void()> const& cb, nsecs_t fireIn) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, fireIn);
+
+ using namespace std::literals;
+ static constexpr int ns_per_s =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(1s).count();
+
+ mCallback = cb;
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {.tv_sec = static_cast<long>(fireIn / ns_per_s),
+ .tv_nsec = static_cast<long>(fireIn % ns_per_s)},
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to set timerfd");
+ }
+}
+
+void Timer::alarmCancel() {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, 0);
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {
+ .tv_sec = 0,
+ .tv_nsec = 0,
+ },
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to disarm timerfd");
+ }
+}
+
+void Timer::dispatch() {
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m) != 0) {
+ ALOGW("Failed to set SCHED_FIFO on dispatch thread");
+ }
+
+ if (pthread_setname_np(pthread_self(), "TimerDispatch")) {
+ ALOGW("Failed to set thread name on dispatch thread");
+ }
+
+ enum DispatchType : uint32_t { TIMER, TERMINATE, MAX_DISPATCH_TYPE };
+ epoll_event timerEvent;
+ timerEvent.events = EPOLLIN;
+ timerEvent.data.u32 = DispatchType::TIMER;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mTimerFd, &timerEvent) == -1) {
+ ALOGE("Error adding timer fd to epoll dispatch loop");
+ return;
+ }
+
+ epoll_event terminateEvent;
+ terminateEvent.events = EPOLLIN;
+ terminateEvent.data.u32 = DispatchType::TERMINATE;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mPipes[kReadPipe], &terminateEvent) == -1) {
+ ALOGE("Error adding control fd to dispatch loop");
+ return;
+ }
+
+ uint64_t iteration = 0;
+ char const traceNamePrefix[] = "TimerIteration #";
+ static constexpr size_t max64print = std::numeric_limits<decltype(iteration)>::digits10;
+ static constexpr size_t maxlen = arrayLen(traceNamePrefix) + max64print;
+ std::array<char, maxlen> str_buffer;
+ auto timing = true;
+ while (timing) {
+ epoll_event events[DispatchType::MAX_DISPATCH_TYPE];
+ int nfds = epoll_wait(mEpollFd, events, DispatchType::MAX_DISPATCH_TYPE, -1);
+
+ if (ATRACE_ENABLED()) {
+ snprintf(str_buffer.data(), str_buffer.size(), "%s%" PRIu64, traceNamePrefix,
+ iteration++);
+ ATRACE_NAME(str_buffer.data());
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR) {
+ timing = false;
+ continue;
+ }
+ }
+
+ for (auto i = 0; i < nfds; i++) {
+ if (events[i].data.u32 == DispatchType::TIMER) {
+ static uint64_t mIgnored = 0;
+ read(mTimerFd, &mIgnored, sizeof(mIgnored));
+ std::function<void()> cb;
+ {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ cb = mCallback;
+ }
+ if (cb) {
+ cb();
+ }
+ }
+ if (events[i].data.u32 == DispatchType::TERMINATE) {
+ timing = false;
+ }
+ }
+ }
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/Timer.h b/services/surfaceflinger/Scheduler/Timer.h
new file mode 100644
index 0000000..0ae82c8
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "TimeKeeper.h"
+
+#include <android-base/thread_annotations.h>
+#include <array>
+#include <thread>
+
+namespace android::scheduler {
+
+class Timer : public TimeKeeper {
+public:
+ Timer();
+ ~Timer();
+ nsecs_t now() const final;
+
+ // NB: alarmIn and alarmCancel are threadsafe; with the last-returning function being effectual
+ // Most users will want to serialize thes calls so as to be aware of the timer state.
+ void alarmIn(std::function<void()> const& cb, nsecs_t fireIn) final;
+ void alarmCancel() final;
+
+private:
+ int const mTimerFd;
+ int const mEpollFd;
+ std::array<int, 2> mPipes;
+
+ std::thread mDispatchThread;
+ void dispatch();
+ void endDispatch();
+
+ std::mutex mMutex;
+ std::function<void()> mCallback GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
new file mode 100644
index 0000000..643c5d2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+//#define LOG_NDEBUG 0
+#include "VSyncPredictor.h"
+#include <android-base/logging.h>
+#include <cutils/compiler.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <algorithm>
+#include <chrono>
+#include "SchedulerUtils.h"
+
+namespace android::scheduler {
+static auto constexpr kNeedsSamplesTag = "SamplesRequested";
+static auto constexpr kMaxPercent = 100u;
+
+VSyncPredictor::~VSyncPredictor() = default;
+
+VSyncPredictor::VSyncPredictor(nsecs_t idealPeriod, size_t historySize,
+ size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent)
+ : kHistorySize(historySize),
+ kMinimumSamplesForPrediction(minimumSamplesForPrediction),
+ kOutlierTolerancePercent(std::min(outlierTolerancePercent, kMaxPercent)),
+ mIdealPeriod(idealPeriod) {
+ mRateMap[mIdealPeriod] = {idealPeriod, 0};
+}
+
+inline size_t VSyncPredictor::next(int i) const {
+ return (i + 1) % timestamps.size();
+}
+
+bool VSyncPredictor::validate(nsecs_t timestamp) const {
+ if (lastTimestampIndex < 0 || timestamps.empty()) {
+ return true;
+ }
+
+ auto const aValidTimestamp = timestamps[lastTimestampIndex];
+ auto const percent = (timestamp - aValidTimestamp) % mIdealPeriod * kMaxPercent / mIdealPeriod;
+ return percent < kOutlierTolerancePercent || percent > (kMaxPercent - kOutlierTolerancePercent);
+}
+
+void VSyncPredictor::addVsyncTimestamp(nsecs_t timestamp) {
+ std::lock_guard<std::mutex> lk(mMutex);
+
+ if (!validate(timestamp)) {
+ ALOGW("timestamp was too far off the last known timestamp");
+ return;
+ }
+
+ if (timestamps.size() != kHistorySize) {
+ timestamps.push_back(timestamp);
+ lastTimestampIndex = next(lastTimestampIndex);
+ } else {
+ lastTimestampIndex = next(lastTimestampIndex);
+ timestamps[lastTimestampIndex] = timestamp;
+ }
+
+ if (timestamps.size() < kMinimumSamplesForPrediction) {
+ mRateMap[mIdealPeriod] = {mIdealPeriod, 0};
+ return;
+ }
+
+ // This is a 'simple linear regression' calculation of Y over X, with Y being the
+ // vsync timestamps, and X being the ordinal of vsync count.
+ // The calculated slope is the vsync period.
+ // Formula for reference:
+ // Sigma_i: means sum over all timestamps.
+ // mean(variable): statistical mean of variable.
+ // X: snapped ordinal of the timestamp
+ // Y: vsync timestamp
+ //
+ // Sigma_i( (X_i - mean(X)) * (Y_i - mean(Y) )
+ // slope = -------------------------------------------
+ // Sigma_i ( X_i - mean(X) ) ^ 2
+ //
+ // intercept = mean(Y) - slope * mean(X)
+ //
+ std::vector<nsecs_t> vsyncTS(timestamps.size());
+ std::vector<nsecs_t> ordinals(timestamps.size());
+
+ // normalizing to the oldest timestamp cuts down on error in calculating the intercept.
+ auto const oldest_ts = *std::min_element(timestamps.begin(), timestamps.end());
+ auto it = mRateMap.find(mIdealPeriod);
+ auto const currentPeriod = std::get<0>(it->second);
+ // TODO (b/144707443): its important that there's some precision in the mean of the ordinals
+ // for the intercept calculation, so scale the ordinals by 10 to continue
+ // fixed point calculation. Explore expanding
+ // scheduler::utils::calculate_mean to have a fixed point fractional part.
+ static constexpr int kScalingFactor = 10;
+
+ for (auto i = 0u; i < timestamps.size(); i++) {
+ vsyncTS[i] = timestamps[i] - oldest_ts;
+ ordinals[i] = ((vsyncTS[i] + (currentPeriod / 2)) / currentPeriod) * kScalingFactor;
+ }
+
+ auto meanTS = scheduler::calculate_mean(vsyncTS);
+ auto meanOrdinal = scheduler::calculate_mean(ordinals);
+ for (auto i = 0; i < vsyncTS.size(); i++) {
+ vsyncTS[i] -= meanTS;
+ ordinals[i] -= meanOrdinal;
+ }
+
+ auto top = 0ll;
+ auto bottom = 0ll;
+ for (auto i = 0; i < vsyncTS.size(); i++) {
+ top += vsyncTS[i] * ordinals[i];
+ bottom += ordinals[i] * ordinals[i];
+ }
+
+ if (CC_UNLIKELY(bottom == 0)) {
+ it->second = {mIdealPeriod, 0};
+ return;
+ }
+
+ nsecs_t const anticipatedPeriod = top / bottom * kScalingFactor;
+ nsecs_t const intercept = meanTS - (anticipatedPeriod * meanOrdinal / kScalingFactor);
+
+ it->second = {anticipatedPeriod, intercept};
+
+ ALOGV("model update ts: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, timestamp,
+ anticipatedPeriod, intercept);
+}
+
+nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const {
+ std::lock_guard<std::mutex> lk(mMutex);
+
+ auto const [slope, intercept] = getVSyncPredictionModel(lk);
+
+ if (timestamps.empty()) {
+ auto const knownTimestamp = mKnownTimestamp ? *mKnownTimestamp : timePoint;
+ auto const numPeriodsOut = ((timePoint - knownTimestamp) / mIdealPeriod) + 1;
+ return knownTimestamp + numPeriodsOut * mIdealPeriod;
+ }
+
+ auto const oldest = *std::min_element(timestamps.begin(), timestamps.end());
+ auto const ordinalRequest = (timePoint - oldest + slope) / slope;
+ auto const prediction = (ordinalRequest * slope) + intercept + oldest;
+
+ ALOGV("prediction made from: %" PRId64 " prediction: %" PRId64 " (+%" PRId64 ") slope: %" PRId64
+ " intercept: %" PRId64,
+ timePoint, prediction, prediction - timePoint, slope, intercept);
+ return prediction;
+}
+
+std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel() const {
+ std::lock_guard<std::mutex> lk(mMutex);
+ return VSyncPredictor::getVSyncPredictionModel(lk);
+}
+
+std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel(
+ std::lock_guard<std::mutex> const&) const {
+ return mRateMap.find(mIdealPeriod)->second;
+}
+
+void VSyncPredictor::setPeriod(nsecs_t period) {
+ ATRACE_CALL();
+
+ std::lock_guard<std::mutex> lk(mMutex);
+ static constexpr size_t kSizeLimit = 30;
+ if (CC_UNLIKELY(mRateMap.size() == kSizeLimit)) {
+ mRateMap.erase(mRateMap.begin());
+ }
+
+ mIdealPeriod = period;
+ if (mRateMap.find(period) == mRateMap.end()) {
+ mRateMap[mIdealPeriod] = {period, 0};
+ }
+
+ if (!timestamps.empty()) {
+ mKnownTimestamp = *std::max_element(timestamps.begin(), timestamps.end());
+ timestamps.clear();
+ lastTimestampIndex = 0;
+ }
+}
+
+bool VSyncPredictor::needsMoreSamples(nsecs_t now) const {
+ using namespace std::literals::chrono_literals;
+ std::lock_guard<std::mutex> lk(mMutex);
+ bool needsMoreSamples = true;
+ if (timestamps.size() >= kMinimumSamplesForPrediction) {
+ nsecs_t constexpr aLongTime =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(500ms).count();
+ if (!(lastTimestampIndex < 0 || timestamps.empty())) {
+ auto const lastTimestamp = timestamps[lastTimestampIndex];
+ needsMoreSamples = !((lastTimestamp + aLongTime) > now);
+ }
+ }
+
+ ATRACE_INT(kNeedsSamplesTag, needsMoreSamples);
+ return needsMoreSamples;
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
new file mode 100644
index 0000000..1590f49
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+#include "VSyncTracker.h"
+
+namespace android::scheduler {
+
+class VSyncPredictor : public VSyncTracker {
+public:
+ /*
+ * \param [in] idealPeriod The initial ideal period to use.
+ * \param [in] historySize The internal amount of entries to store in the model.
+ * \param [in] minimumSamplesForPrediction The minimum number of samples to collect before
+ * predicting. \param [in] outlierTolerancePercent a number 0 to 100 that will be used to filter
+ * samples that fall outlierTolerancePercent from an anticipated vsync event.
+ */
+ VSyncPredictor(nsecs_t idealPeriod, size_t historySize, size_t minimumSamplesForPrediction,
+ uint32_t outlierTolerancePercent);
+ ~VSyncPredictor();
+
+ void addVsyncTimestamp(nsecs_t timestamp) final;
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final;
+
+ /*
+ * Inform the model that the period is anticipated to change to a new value.
+ * model will use the period parameter to predict vsync events until enough
+ * timestamps with the new period have been collected.
+ *
+ * \param [in] period The new period that should be used.
+ */
+ void setPeriod(nsecs_t period);
+
+ /* Query if the model is in need of more samples to make a prediction at timePoint.
+ * \param [in] timePoint The timePoint to inquire of.
+ * \return True, if model would benefit from more samples, False if not.
+ */
+ bool needsMoreSamples(nsecs_t timePoint) const;
+
+ std::tuple<nsecs_t /* slope */, nsecs_t /* intercept */> getVSyncPredictionModel() const;
+
+private:
+ VSyncPredictor(VSyncPredictor const&) = delete;
+ VSyncPredictor& operator=(VSyncPredictor const&) = delete;
+
+ size_t const kHistorySize;
+ size_t const kMinimumSamplesForPrediction;
+ size_t const kOutlierTolerancePercent;
+
+ std::mutex mutable mMutex;
+ size_t next(int i) const REQUIRES(mMutex);
+ bool validate(nsecs_t timestamp) const REQUIRES(mMutex);
+ std::tuple<nsecs_t, nsecs_t> getVSyncPredictionModel(std::lock_guard<std::mutex> const&) const
+ REQUIRES(mMutex);
+
+ nsecs_t mIdealPeriod GUARDED_BY(mMutex);
+ std::optional<nsecs_t> mKnownTimestamp GUARDED_BY(mMutex);
+
+ std::unordered_map<nsecs_t, std::tuple<nsecs_t, nsecs_t>> mutable mRateMap GUARDED_BY(mMutex);
+
+ int lastTimestampIndex GUARDED_BY(mMutex) = 0;
+ std::vector<nsecs_t> timestamps GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index ec15bad..14a2ab1 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1715,7 +1715,12 @@
refreshArgs.layersWithQueuedFrames.reserve(mLayersWithQueuedFrames.size());
for (sp<Layer> layer : mLayersWithQueuedFrames) {
auto compositionLayer = layer->getCompositionLayer();
- if (compositionLayer) refreshArgs.layersWithQueuedFrames.push_back(compositionLayer.get());
+ if (compositionLayer) {
+ refreshArgs.layersWithQueuedFrames.push_back(compositionLayer.get());
+ mFrameTracer->traceTimestamp(layer->getSequence(), layer->getCurrentBufferId(),
+ layer->getCurrentFrameNumber(), systemTime(),
+ FrameTracer::FrameEvent::HWC_COMPOSITION_QUEUED);
+ }
}
refreshArgs.repaintEverything = mRepaintEverything.exchange(false);
@@ -1896,9 +1901,8 @@
}
mDrawingState.traverseInZOrder([&](Layer* layer) {
- bool frameLatched =
- layer->onPostComposition(displayDevice->getId(), glCompositionDoneFenceTime,
- presentFenceTime, compositorTiming);
+ bool frameLatched = layer->onPostComposition(displayDevice, glCompositionDoneFenceTime,
+ presentFenceTime, compositorTiming);
if (frameLatched) {
recordBufferingStats(layer->getName(), layer->getOccupancyHistory(false));
}
@@ -3714,6 +3718,8 @@
if (currentMode == HWC_POWER_MODE_OFF) {
// Turn on the display
+ // TODO: @vhau temp fix only! See b/141111965
+ mTransactionCompletedThread.clearAllPending();
getHwComposer().setPowerMode(*displayId, mode);
if (display->isPrimary() && mode != HWC_POWER_MODE_DOZE_SUSPEND) {
setVsyncEnabledInHWC(*displayId, mHWCVsyncPendingState);
diff --git a/services/surfaceflinger/TransactionCompletedThread.cpp b/services/surfaceflinger/TransactionCompletedThread.cpp
index 8db03db..c15355d 100644
--- a/services/surfaceflinger/TransactionCompletedThread.cpp
+++ b/services/surfaceflinger/TransactionCompletedThread.cpp
@@ -189,6 +189,15 @@
return NO_ERROR;
}
+void TransactionCompletedThread::clearAllPending() {
+ std::lock_guard lock(mMutex);
+ if (!mRunning) {
+ return;
+ }
+ mPendingTransactions.clear();
+ mConditionVariable.notify_all();
+}
+
status_t TransactionCompletedThread::registerUnpresentedCallbackHandle(
const sp<CallbackHandle>& handle) {
std::lock_guard lock(mMutex);
diff --git a/services/surfaceflinger/TransactionCompletedThread.h b/services/surfaceflinger/TransactionCompletedThread.h
index 12ea8fe..cd95bfb 100644
--- a/services/surfaceflinger/TransactionCompletedThread.h
+++ b/services/surfaceflinger/TransactionCompletedThread.h
@@ -70,6 +70,8 @@
// Notifies the TransactionCompletedThread that a pending CallbackHandle has been presented.
status_t finalizePendingCallbackHandles(const std::deque<sp<CallbackHandle>>& handles);
+ void clearAllPending();
+
// Adds the Transaction CallbackHandle from a layer that does not need to be relatched and
// presented this frame.
status_t registerUnpresentedCallbackHandle(const sp<CallbackHandle>& handle);
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index 78114a1..0c4a752 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -55,6 +55,8 @@
"TransactionApplicationTest.cpp",
"StrongTypingTest.cpp",
"VSyncDispatchTimerQueueTest.cpp",
+ "VSyncDispatchRealtimeTest.cpp",
+ "VSyncPredictorTest.cpp",
"mock/DisplayHardware/MockComposer.cpp",
"mock/DisplayHardware/MockDisplay.cpp",
"mock/DisplayHardware/MockPowerAdvisor.cpp",
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
new file mode 100644
index 0000000..c012616
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Scheduler/TimeKeeper.h"
+#include "Scheduler/Timer.h"
+#include "Scheduler/VSyncDispatchTimerQueue.h"
+#include "Scheduler/VSyncTracker.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <thread>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+template <typename Rep, typename Per>
+constexpr nsecs_t toNs(std::chrono::duration<Rep, Per> const& tp) {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(tp).count();
+}
+
+class FixedRateIdealStubTracker : public VSyncTracker {
+public:
+ FixedRateIdealStubTracker() : mPeriod{toNs(3ms)} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final {
+ auto const floor = timePoint % mPeriod;
+ if (floor == 0) {
+ return timePoint;
+ }
+ return timePoint - floor + mPeriod;
+ }
+
+private:
+ nsecs_t const mPeriod;
+};
+
+class VRRStubTracker : public VSyncTracker {
+public:
+ VRRStubTracker(nsecs_t period) : mPeriod{period} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t time_point) const final {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ auto const normalized_to_base = time_point - mBase;
+ auto const floor = (normalized_to_base) % mPeriod;
+ if (floor == 0) {
+ return time_point;
+ }
+ return normalized_to_base - floor + mPeriod + mBase;
+ }
+
+ void set_interval(nsecs_t interval, nsecs_t last_known) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mPeriod = interval;
+ mBase = last_known;
+ }
+
+private:
+ std::mutex mutable mMutex;
+ nsecs_t mPeriod;
+ nsecs_t mBase = 0;
+};
+
+struct VSyncDispatchRealtimeTest : testing::Test {
+ static nsecs_t constexpr mDispatchGroupThreshold = toNs(100us);
+ static size_t constexpr mIterations = 20;
+};
+
+class RepeatingCallbackReceiver {
+public:
+ RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t wl)
+ : mWorkload(wl),
+ mCallback(
+ dispatch, [&](auto time) { callback_called(time); }, "repeat0") {}
+
+ void repeatedly_schedule(size_t iterations, std::function<void(nsecs_t)> const& onEachFrame) {
+ mCallbackTimes.reserve(iterations);
+ mCallback.schedule(mWorkload, systemTime(SYSTEM_TIME_MONOTONIC) + mWorkload);
+
+ for (auto i = 0u; i < iterations - 1; i++) {
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ mCalled = false;
+ auto last = mLastTarget;
+ lk.unlock();
+
+ onEachFrame(last);
+
+ mCallback.schedule(mWorkload, last + mWorkload);
+ }
+
+ // wait for the last callback.
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ }
+
+ void with_callback_times(std::function<void(std::vector<nsecs_t> const&)> const& fn) const {
+ fn(mCallbackTimes);
+ }
+
+private:
+ void callback_called(nsecs_t time) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mCallbackTimes.push_back(time);
+ mCalled = true;
+ mLastTarget = time;
+ mCv.notify_all();
+ }
+
+ nsecs_t const mWorkload;
+ VSyncCallbackRegistration mCallback;
+
+ std::mutex mMutex;
+ std::condition_variable mCv;
+ bool mCalled = false;
+ nsecs_t mLastTarget = 0;
+ std::vector<nsecs_t> mCallbackTimes;
+};
+
+TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
+ FixedRateIdealStubTracker tracker;
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ static size_t constexpr num_clients = 3;
+ std::array<RepeatingCallbackReceiver, num_clients>
+ cb_receiver{RepeatingCallbackReceiver(dispatch, toNs(1500us)),
+ RepeatingCallbackReceiver(dispatch, toNs(0h)),
+ RepeatingCallbackReceiver(dispatch, toNs(1ms))};
+
+ auto const on_each_frame = [](nsecs_t) {};
+ std::array<std::thread, num_clients> threads{
+ std::thread([&] { cb_receiver[0].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[1].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[2].repeatedly_schedule(mIterations, on_each_frame); }),
+ };
+
+ for (auto it = threads.rbegin(); it != threads.rend(); it++) {
+ it->join();
+ }
+
+ for (auto const& cbs : cb_receiver) {
+ cbs.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+ }
+}
+
+// starts at 333hz, slides down to 43hz
+TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
+ auto next_vsync_interval = toNs(3ms);
+ VRRStubTracker tracker(next_vsync_interval);
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
+ };
+
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+
+// starts at 333hz, jumps to 200hz at frame 10
+TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
+ VRRStubTracker tracker(toNs(3ms));
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto jump_frame_counter = 0u;
+ auto constexpr jump_frame_at = 10u;
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ if (jump_frame_counter++ == jump_frame_at) {
+ tracker.set_interval(toNs(5ms), last_known);
+ }
+ };
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
new file mode 100644
index 0000000..d0c8090
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "LibSurfaceFlingerUnittests"
+#define LOG_NDEBUG 0
+
+#include "Scheduler/VSyncPredictor.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <algorithm>
+#include <chrono>
+#include <utility>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+MATCHER_P2(IsCloseTo, value, tolerance, "is within tolerance") {
+ return arg <= value + tolerance && value >= value - tolerance;
+}
+
+std::vector<nsecs_t> generateVsyncTimestamps(size_t count, nsecs_t period, nsecs_t bias) {
+ std::vector<nsecs_t> vsyncs(count);
+ std::generate(vsyncs.begin(), vsyncs.end(),
+ [&, n = 0]() mutable { return n++ * period + bias; });
+ return vsyncs;
+}
+
+struct VSyncPredictorTest : testing::Test {
+ nsecs_t mNow = 0;
+ nsecs_t mPeriod = 1000;
+ static constexpr size_t kHistorySize = 10;
+ static constexpr size_t kMinimumSamplesForPrediction = 6;
+ static constexpr size_t kOutlierTolerancePercent = 25;
+ static constexpr nsecs_t mMaxRoundingError = 100;
+
+ VSyncPredictor tracker{mPeriod, kHistorySize, kMinimumSamplesForPrediction,
+ kOutlierTolerancePercent};
+};
+
+TEST_F(VSyncPredictorTest, reportsAnticipatedPeriod) {
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+
+ EXPECT_THAT(slope, Eq(mPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ auto const changedPeriod = 2000;
+ tracker.setPeriod(changedPeriod);
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(changedPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+}
+
+TEST_F(VSyncPredictorTest, reportsSamplesNeededWhenHasNoDataPoints) {
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow += mPeriod));
+ tracker.addVsyncTimestamp(mNow);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+}
+
+TEST_F(VSyncPredictorTest, reportsSamplesNeededAfterExplicitRateChange) {
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(mNow += mPeriod);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+
+ auto const changedPeriod = mPeriod * 2;
+ tracker.setPeriod(changedPeriod);
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow));
+
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow += changedPeriod));
+ tracker.addVsyncTimestamp(mNow);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+}
+
+TEST_F(VSyncPredictorTest, transitionsToModelledPointsAfterSynthetic) {
+ auto last = mNow;
+ auto const bias = 10;
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(last + mPeriod));
+ mNow += mPeriod - bias;
+ last = mNow;
+ tracker.addVsyncTimestamp(mNow);
+ mNow += bias;
+ }
+
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + mPeriod - bias));
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 100), Eq(mNow + mPeriod - bias));
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 990), Eq(mNow + 2 * mPeriod - bias));
+}
+
+TEST_F(VSyncPredictorTest, uponNotifiedOfInaccuracyUsesSynthetic) {
+ auto const slightlyLessPeriod = mPeriod - 10;
+ auto const changedPeriod = mPeriod - 1;
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(mNow += slightlyLessPeriod);
+ }
+
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + slightlyLessPeriod));
+ tracker.setPeriod(changedPeriod);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + changedPeriod));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelines_60hzHighVariance) {
+ // these are precomputed simulated 16.6s vsyncs with uniform distribution +/- 1.6ms error
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 15492949, 32325658, 49534984, 67496129, 84652891,
+ 100332564, 117737004, 132125931, 149291099, 165199602,
+ };
+ auto constexpr idealPeriod = 16600000;
+ auto constexpr expectedPeriod = 16639242;
+ auto constexpr expectedIntercept = 1049341;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelines_90hzLowVariance) {
+ // these are precomputed simulated 11.1 vsyncs with uniform distribution +/- 1ms error
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 11167047, 22603464, 32538479, 44938134, 56321268,
+ 66730346, 78062637, 88171429, 99707843, 111397621,
+ };
+ auto idealPeriod = 11110000;
+ auto expectedPeriod = 11089413;
+ auto expectedIntercept = 94421;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelinesDiscontinuous_22hzLowVariance) {
+ // these are 11.1s vsyncs with low variance, randomly computed, between -1 and 1ms
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 45259463, // 0
+ 91511026, // 1
+ 136307650, // 2
+ 1864501714, // 40
+ 1908641034, // 41
+ 1955278544, // 42
+ 4590180096, // 100
+ 4681594994, // 102
+ 5499224734, // 120
+ 5591378272, // 122
+ };
+ auto idealPeriod = 45454545;
+ auto expectedPeriod = 45450152;
+ auto expectedIntercept = 469647;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, againstOutliersDiscontinuous_500hzLowVariance) {
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 1992548, // 0
+ 4078038, // 1
+ 6165794, // 2
+ 7958171, // 3
+ 10193537, // 4
+ 2401840200, // 1200
+ 2403000000, // an outlier that should be excluded (1201 and a half)
+ 2405803629, // 1202
+ 2408028599, // 1203
+ 2410121051, // 1204
+ };
+ auto idealPeriod = 2000000;
+ auto expectedPeriod = 1999892;
+ auto expectedIntercept = 175409;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, handlesVsyncChange) {
+ auto const fastPeriod = 100;
+ auto const fastTimeBase = 100;
+ auto const slowPeriod = 400;
+ auto const slowTimeBase = 800;
+ auto const simulatedVsyncsFast =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod, fastTimeBase);
+ auto const simulatedVsyncsSlow =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, slowPeriod, slowTimeBase);
+
+ tracker.setPeriod(fastPeriod);
+ for (auto const& timestamp : simulatedVsyncsFast) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ auto const mMaxRoundingError = 100;
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(fastPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(0, mMaxRoundingError));
+
+ tracker.setPeriod(slowPeriod);
+ for (auto const& timestamp : simulatedVsyncsSlow) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(slowPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(0, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, willBeAccurateUsingPriorResultsForRate) {
+ auto const fastPeriod = 101000;
+ auto const fastTimeBase = fastPeriod - 500;
+ auto const fastPeriod2 = 99000;
+
+ auto const slowPeriod = 400000;
+ auto const slowTimeBase = 800000 - 201;
+ auto const simulatedVsyncsFast =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod, fastTimeBase);
+ auto const simulatedVsyncsSlow =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, slowPeriod, slowTimeBase);
+ auto const simulatedVsyncsFast2 =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod2, fastTimeBase);
+
+ auto idealPeriod = 100000;
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncsFast) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ tracker.setPeriod(slowPeriod);
+ for (auto const& timestamp : simulatedVsyncsSlow) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ // we had a model for 100ns mPeriod before, use that until the new samples are
+ // sufficiently built up
+ tracker.setPeriod(idealPeriod);
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ for (auto const& timestamp : simulatedVsyncsFast2) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod2));
+ EXPECT_THAT(intercept, Eq(0));
+}
+
+TEST_F(VSyncPredictorTest, willBecomeInaccurateAfterA_longTimeWithNoSamples) {
+ auto const simulatedVsyncs = generateVsyncTimestamps(kMinimumSamplesForPrediction, mPeriod, 0);
+
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto const mNow = *simulatedVsyncs.rbegin();
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+
+ // TODO: would be better to decay this as a result of the variance of the samples
+ static auto constexpr aLongTimeOut = 1000000000;
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow + aLongTimeOut));
+}
+
+TEST_F(VSyncPredictorTest, idealModelPredictionsBeforeRegressionModelIsBuilt) {
+ auto const simulatedVsyncs =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction + 1, mPeriod, 0);
+ nsecs_t const mNow = 0;
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mPeriod));
+
+ nsecs_t const aBitOfTime = 422;
+
+ for (auto i = 0; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(simulatedVsyncs[i]);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(simulatedVsyncs[i] + aBitOfTime),
+ Eq(mPeriod + simulatedVsyncs[i]));
+ }
+
+ for (auto i = kMinimumSamplesForPrediction; i < simulatedVsyncs.size(); i++) {
+ tracker.addVsyncTimestamp(simulatedVsyncs[i]);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(simulatedVsyncs[i] + aBitOfTime),
+ Eq(mPeriod + simulatedVsyncs[i]));
+ }
+}
+
+} // namespace android::scheduler