Merge "[Shadows] Add api to set global shadow settings (4/n)"
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 4026f29..e6e232c 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -92,10 +92,6 @@
static constexpr const char* CACHE_DIR_POSTFIX = "/cache";
static constexpr const char* CODE_CACHE_DIR_POSTFIX = "/code_cache";
-static constexpr const char *kIdMapPath = "/system/bin/idmap";
-static constexpr const char* IDMAP_PREFIX = "/data/resource-cache/";
-static constexpr const char* IDMAP_SUFFIX = "@idmap";
-
// fsverity assumes the page size is always 4096. If not, the feature can not be
// enabled.
static constexpr int kVerityPageSize = 4096;
@@ -2253,206 +2249,6 @@
return res;
}
-static void run_idmap(const char *target_apk, const char *overlay_apk, int idmap_fd)
-{
- execl(kIdMapPath, kIdMapPath, "--fd", target_apk, overlay_apk,
- StringPrintf("%d", idmap_fd).c_str(), (char*)nullptr);
- PLOG(ERROR) << "execl (" << kIdMapPath << ") failed";
-}
-
-static void run_verify_idmap(const char *target_apk, const char *overlay_apk, int idmap_fd)
-{
- execl(kIdMapPath, kIdMapPath, "--verify", target_apk, overlay_apk,
- StringPrintf("%d", idmap_fd).c_str(), (char*)nullptr);
- PLOG(ERROR) << "execl (" << kIdMapPath << ") failed";
-}
-
-static bool delete_stale_idmap(const char* target_apk, const char* overlay_apk,
- const char* idmap_path, int32_t uid) {
- int idmap_fd = open(idmap_path, O_RDWR);
- if (idmap_fd < 0) {
- PLOG(ERROR) << "idmap open failed: " << idmap_path;
- unlink(idmap_path);
- return true;
- }
-
- pid_t pid;
- pid = fork();
- if (pid == 0) {
- /* child -- drop privileges before continuing */
- if (setgid(uid) != 0) {
- LOG(ERROR) << "setgid(" << uid << ") failed during idmap";
- exit(1);
- }
- if (setuid(uid) != 0) {
- LOG(ERROR) << "setuid(" << uid << ") failed during idmap";
- exit(1);
- }
- if (flock(idmap_fd, LOCK_EX | LOCK_NB) != 0) {
- PLOG(ERROR) << "flock(" << idmap_path << ") failed during idmap";
- exit(1);
- }
-
- run_verify_idmap(target_apk, overlay_apk, idmap_fd);
- exit(1); /* only if exec call to deleting stale idmap failed */
- } else {
- int status = wait_child(pid);
- close(idmap_fd);
-
- if (status != 0) {
- // Failed on verifying if idmap is made from target_apk and overlay_apk.
- LOG(DEBUG) << "delete stale idmap: " << idmap_path;
- unlink(idmap_path);
- return true;
- }
- }
- return false;
-}
-
-// Transform string /a/b/c.apk to (prefix)/a@b@c.apk@(suffix)
-// eg /a/b/c.apk to /data/resource-cache/a@b@c.apk@idmap
-static int flatten_path(const char *prefix, const char *suffix,
- const char *overlay_path, char *idmap_path, size_t N)
-{
- if (overlay_path == nullptr || idmap_path == nullptr) {
- return -1;
- }
- const size_t len_overlay_path = strlen(overlay_path);
- // will access overlay_path + 1 further below; requires absolute path
- if (len_overlay_path < 2 || *overlay_path != '/') {
- return -1;
- }
- const size_t len_idmap_root = strlen(prefix);
- const size_t len_suffix = strlen(suffix);
- if (SIZE_MAX - len_idmap_root < len_overlay_path ||
- SIZE_MAX - (len_idmap_root + len_overlay_path) < len_suffix) {
- // additions below would cause overflow
- return -1;
- }
- if (N < len_idmap_root + len_overlay_path + len_suffix) {
- return -1;
- }
- memset(idmap_path, 0, N);
- snprintf(idmap_path, N, "%s%s%s", prefix, overlay_path + 1, suffix);
- char *ch = idmap_path + len_idmap_root;
- while (*ch != '\0') {
- if (*ch == '/') {
- *ch = '@';
- }
- ++ch;
- }
- return 0;
-}
-
-binder::Status InstalldNativeService::idmap(const std::string& targetApkPath,
- const std::string& overlayApkPath, int32_t uid) {
- ENFORCE_UID(AID_SYSTEM);
- CHECK_ARGUMENT_PATH(targetApkPath);
- CHECK_ARGUMENT_PATH(overlayApkPath);
- std::lock_guard<std::recursive_mutex> lock(mLock);
-
- const char* target_apk = targetApkPath.c_str();
- const char* overlay_apk = overlayApkPath.c_str();
- ALOGV("idmap target_apk=%s overlay_apk=%s uid=%d\n", target_apk, overlay_apk, uid);
-
- int idmap_fd = -1;
- char idmap_path[PATH_MAX];
- struct stat idmap_stat;
- bool outdated = false;
-
- if (flatten_path(IDMAP_PREFIX, IDMAP_SUFFIX, overlay_apk,
- idmap_path, sizeof(idmap_path)) == -1) {
- ALOGE("idmap cannot generate idmap path for overlay %s\n", overlay_apk);
- goto fail;
- }
-
- if (stat(idmap_path, &idmap_stat) < 0) {
- outdated = true;
- } else {
- outdated = delete_stale_idmap(target_apk, overlay_apk, idmap_path, uid);
- }
-
- if (outdated) {
- idmap_fd = open(idmap_path, O_RDWR | O_CREAT | O_EXCL, 0644);
- } else {
- idmap_fd = open(idmap_path, O_RDWR);
- }
-
- if (idmap_fd < 0) {
- ALOGE("idmap cannot open '%s' for output: %s\n", idmap_path, strerror(errno));
- goto fail;
- }
- if (fchown(idmap_fd, AID_SYSTEM, uid) < 0) {
- ALOGE("idmap cannot chown '%s'\n", idmap_path);
- goto fail;
- }
- if (fchmod(idmap_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) < 0) {
- ALOGE("idmap cannot chmod '%s'\n", idmap_path);
- goto fail;
- }
-
- if (!outdated) {
- close(idmap_fd);
- return ok();
- }
-
- pid_t pid;
- pid = fork();
- if (pid == 0) {
- /* child -- drop privileges before continuing */
- if (setgid(uid) != 0) {
- ALOGE("setgid(%d) failed during idmap\n", uid);
- exit(1);
- }
- if (setuid(uid) != 0) {
- ALOGE("setuid(%d) failed during idmap\n", uid);
- exit(1);
- }
- if (flock(idmap_fd, LOCK_EX | LOCK_NB) != 0) {
- ALOGE("flock(%s) failed during idmap: %s\n", idmap_path, strerror(errno));
- exit(1);
- }
-
- run_idmap(target_apk, overlay_apk, idmap_fd);
- exit(1); /* only if exec call to idmap failed */
- } else {
- int status = wait_child(pid);
- if (status != 0) {
- ALOGE("idmap failed, status=0x%04x\n", status);
- goto fail;
- }
- }
-
- close(idmap_fd);
- return ok();
-fail:
- if (idmap_fd >= 0) {
- close(idmap_fd);
- unlink(idmap_path);
- }
- return error();
-}
-
-binder::Status InstalldNativeService::removeIdmap(const std::string& overlayApkPath) {
- ENFORCE_UID(AID_SYSTEM);
- CHECK_ARGUMENT_PATH(overlayApkPath);
- std::lock_guard<std::recursive_mutex> lock(mLock);
-
- const char* overlay_apk = overlayApkPath.c_str();
- char idmap_path[PATH_MAX];
-
- if (flatten_path(IDMAP_PREFIX, IDMAP_SUFFIX, overlay_apk,
- idmap_path, sizeof(idmap_path)) == -1) {
- ALOGE("idmap cannot generate idmap path for overlay %s\n", overlay_apk);
- return error();
- }
- if (unlink(idmap_path) < 0) {
- ALOGE("couldn't unlink idmap file %s\n", idmap_path);
- return error();
- }
- return ok();
-}
-
binder::Status InstalldNativeService::restoreconAppData(const std::unique_ptr<std::string>& uuid,
const std::string& packageName, int32_t userId, int32_t flags, int32_t appId,
const std::string& seInfo) {
diff --git a/cmds/installd/InstalldNativeService.h b/cmds/installd/InstalldNativeService.h
index 2b7bf33..ef91bf8 100644
--- a/cmds/installd/InstalldNativeService.h
+++ b/cmds/installd/InstalldNativeService.h
@@ -119,9 +119,6 @@
binder::Status destroyProfileSnapshot(const std::string& packageName,
const std::string& profileName);
- binder::Status idmap(const std::string& targetApkPath, const std::string& overlayApkPath,
- int32_t uid);
- binder::Status removeIdmap(const std::string& overlayApkPath);
binder::Status rmPackageDir(const std::string& packageDir);
binder::Status markBootComplete(const std::string& instructionSet);
binder::Status freeCache(const std::unique_ptr<std::string>& uuid, int64_t targetFreeBytes,
diff --git a/cmds/installd/binder/android/os/IInstalld.aidl b/cmds/installd/binder/android/os/IInstalld.aidl
index d99bcc8..6cc4bde 100644
--- a/cmds/installd/binder/android/os/IInstalld.aidl
+++ b/cmds/installd/binder/android/os/IInstalld.aidl
@@ -72,8 +72,6 @@
@utf8InCpp String profileName, @utf8InCpp String classpath);
void destroyProfileSnapshot(@utf8InCpp String packageName, @utf8InCpp String profileName);
- void idmap(@utf8InCpp String targetApkPath, @utf8InCpp String overlayApkPath, int uid);
- void removeIdmap(@utf8InCpp String overlayApkPath);
void rmPackageDir(@utf8InCpp String packageDir);
void markBootComplete(@utf8InCpp String instructionSet);
void freeCache(@nullable @utf8InCpp String uuid, long targetFreeBytes,
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 616c3b2..f95e445 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -339,6 +339,10 @@
? "dalvik.vm.dex2oat-threads"
: "dalvik.vm.boot-dex2oat-threads";
std::string dex2oat_threads_arg = MapPropertyToArg(threads_property, "-j%s");
+ const char* cpu_set_property = post_bootcomplete
+ ? "dalvik.vm.dex2oat-cpu-set"
+ : "dalvik.vm.boot-dex2oat-cpu-set";
+ std::string dex2oat_cpu_set_arg = MapPropertyToArg(cpu_set_property, "--cpu-set=%s");
std::string bootclasspath;
char* dex2oat_bootclasspath = getenv("DEX2OATBOOTCLASSPATH");
@@ -518,6 +522,7 @@
AddArg(image_block_size_arg);
AddArg(dex2oat_compiler_filter_arg);
AddArg(dex2oat_threads_arg);
+ AddArg(dex2oat_cpu_set_arg);
AddArg(dex2oat_swap_fd);
AddArg(dex2oat_image_fd);
diff --git a/cmds/installd/migrate_legacy_obb_data.sh b/cmds/installd/migrate_legacy_obb_data.sh
index 0e6d7b9..7399681 100644
--- a/cmds/installd/migrate_legacy_obb_data.sh
+++ b/cmds/installd/migrate_legacy_obb_data.sh
@@ -15,17 +15,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-rm -rf /data/media/Android/obb/test_probe
-mkdir -p /data/media/Android/obb/
-touch /data/media/Android/obb/test_probe
+rm -rf /data/media/0/Android/obb/test_probe
+mkdir -p /data/media/0/Android/obb/
+touch /data/media/0/Android/obb/test_probe
if ! test -f /data/media/0/Android/obb/test_probe ; then
log -p i -t migrate_legacy_obb_data "No support for 'unshared_obb'. Not migrating"
- rm -rf /data/media/Android/obb/test_probe
+ rm -rf /data/media/0/Android/obb/test_probe
exit 0
fi
# Delete the test file, and remove the obb folder if it is empty
-rm -rf /data/media/Android/obb/test_probe
+rm -rf /data/media/0/Android/obb/test_probe
rmdir /data/media/obb
if ! test -d /data/media/obb ; then
diff --git a/cmds/installd/otapreopt.cpp b/cmds/installd/otapreopt.cpp
index db36ce3..eefbe4f 100644
--- a/cmds/installd/otapreopt.cpp
+++ b/cmds/installd/otapreopt.cpp
@@ -480,6 +480,10 @@
"-j",
false,
cmd);
+ AddCompilerOptionFromSystemProperty("dalvik.vm.image-dex2oat-cpu-set",
+ "--cpu-set=",
+ false,
+ cmd);
AddCompilerOptionFromSystemProperty(
StringPrintf("dalvik.vm.isa.%s.variant", isa).c_str(),
"--instruction-set-variant=",
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 861401c..141171b 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -68,7 +68,15 @@
}
#endif // !VENDORSERVICEMANAGER
-ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {}
+ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {
+#ifndef VENDORSERVICEMANAGER
+ // can process these at any times, don't want to delay first VINTF client
+ std::thread([] {
+ vintf::VintfObject::GetDeviceHalManifest();
+ vintf::VintfObject::GetFrameworkHalManifest();
+ }).detach();
+#endif // !VENDORSERVICEMANAGER
+}
ServiceManager::~ServiceManager() {
// this should only happen in tests
@@ -306,7 +314,7 @@
if (listeners.empty()) {
*it = mNameToCallback.erase(*it);
} else {
- it++;
+ (*it)++;
}
}
diff --git a/libs/binder/IServiceManager.cpp b/libs/binder/IServiceManager.cpp
index 4f47db1..bac8b66 100644
--- a/libs/binder/IServiceManager.cpp
+++ b/libs/binder/IServiceManager.cpp
@@ -280,19 +280,31 @@
std::condition_variable mCv;
};
+ // Simple RAII object to ensure a function call immediately before going out of scope
+ class Defer {
+ public:
+ Defer(std::function<void()>&& f) : mF(std::move(f)) {}
+ ~Defer() { mF(); }
+ private:
+ std::function<void()> mF;
+ };
+
const std::string name = String8(name16).c_str();
sp<IBinder> out;
if (!mTheRealServiceManager->getService(name, &out).isOk()) {
return nullptr;
}
- if(out != nullptr) return out;
+ if (out != nullptr) return out;
sp<Waiter> waiter = new Waiter;
if (!mTheRealServiceManager->registerForNotifications(
name, waiter).isOk()) {
return nullptr;
}
+ Defer unregister ([&] {
+ mTheRealServiceManager->unregisterForNotifications(name, waiter);
+ });
while(true) {
{
@@ -316,7 +328,7 @@
if (!mTheRealServiceManager->getService(name, &out).isOk()) {
return nullptr;
}
- if(out != nullptr) return out;
+ if (out != nullptr) return out;
ALOGW("Waited one second for %s", name.c_str());
}
diff --git a/libs/binder/ProcessState.cpp b/libs/binder/ProcessState.cpp
index 3f47f3b..37c0d77 100644
--- a/libs/binder/ProcessState.cpp
+++ b/libs/binder/ProcessState.cpp
@@ -385,6 +385,12 @@
, mThreadPoolSeq(1)
, mCallRestriction(CallRestriction::NONE)
{
+
+// TODO(b/139016109): enforce in build system
+#if defined(__ANDROID_APEX__) && !defined(__ANDROID_APEX_COM_ANDROID_VNDK_CURRENT__)
+ LOG_ALWAYS_FATAL("Cannot use libbinder in APEX (only system.img libbinder) since it is not stable.");
+#endif
+
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
diff --git a/libs/binder/Status.cpp b/libs/binder/Status.cpp
index 0ad99ce..674f065 100644
--- a/libs/binder/Status.cpp
+++ b/libs/binder/Status.cpp
@@ -232,9 +232,10 @@
ret.append("No error");
} else {
ret.appendFormat("Status(%d, %s): '", mException, exceptionToString(mException).c_str());
- if (mException == EX_SERVICE_SPECIFIC ||
- mException == EX_TRANSACTION_FAILED) {
+ if (mException == EX_SERVICE_SPECIFIC) {
ret.appendFormat("%d: ", mErrorCode);
+ } else if (mException == EX_TRANSACTION_FAILED) {
+ ret.appendFormat("%s: ", statusToString(mErrorCode).c_str());
}
ret.append(String8(mMessage));
ret.append("'");
diff --git a/libs/binder/aidl/android/os/IServiceManager.aidl b/libs/binder/aidl/android/os/IServiceManager.aidl
index 8c7ebba..b965881 100644
--- a/libs/binder/aidl/android/os/IServiceManager.aidl
+++ b/libs/binder/aidl/android/os/IServiceManager.aidl
@@ -58,7 +58,7 @@
* Returns null if the service does not exist.
*/
@UnsupportedAppUsage
- IBinder getService(@utf8InCpp String name);
+ @nullable IBinder getService(@utf8InCpp String name);
/**
* Retrieve an existing service called @a name from the service
@@ -66,7 +66,7 @@
* exist.
*/
@UnsupportedAppUsage
- IBinder checkService(@utf8InCpp String name);
+ @nullable IBinder checkService(@utf8InCpp String name);
/**
* Place a new @a service called @a name into the service
diff --git a/libs/binder/ndk/include_ndk/android/binder_ibinder.h b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
index 4d5c044..4560f22 100644
--- a/libs/binder/ndk/include_ndk/android/binder_ibinder.h
+++ b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
@@ -34,6 +34,12 @@
#include <android/binder_status.h>
__BEGIN_DECLS
+
+#ifndef __ANDROID_API__
+#error Android builds must be compiled against a specific API. If this is an \
+ android platform host build, you must use libbinder_ndk_host_user.
+#endif
+
#if __ANDROID_API__ >= 29
// Also see TF_* in kernel's binder.h
diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
index f3bc31b..7871667 100644
--- a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
+++ b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
@@ -441,6 +441,42 @@
}
/**
+ * Writes a ScopedFileDescriptor object inside a std::vector<ScopedFileDescriptor> at index 'index'
+ * to 'parcel'.
+ */
+template <>
+inline binder_status_t AParcel_writeStdVectorParcelableElement<ScopedFileDescriptor>(
+ AParcel* parcel, const void* vectorData, size_t index) {
+ const std::vector<ScopedFileDescriptor>* vector =
+ static_cast<const std::vector<ScopedFileDescriptor>*>(vectorData);
+ int writeFd = vector->at(index).get();
+ if (writeFd < 0) {
+ return STATUS_UNEXPECTED_NULL;
+ }
+ return AParcel_writeParcelFileDescriptor(parcel, writeFd);
+}
+
+/**
+ * Reads a ScopedFileDescriptor object inside a std::vector<ScopedFileDescriptor> at index 'index'
+ * from 'parcel'.
+ */
+template <>
+inline binder_status_t AParcel_readStdVectorParcelableElement<ScopedFileDescriptor>(
+ const AParcel* parcel, void* vectorData, size_t index) {
+ std::vector<ScopedFileDescriptor>* vector =
+ static_cast<std::vector<ScopedFileDescriptor>*>(vectorData);
+ int readFd;
+ binder_status_t status = AParcel_readParcelFileDescriptor(parcel, &readFd);
+ if (status == STATUS_OK) {
+ if (readFd < 0) {
+ return STATUS_UNEXPECTED_NULL;
+ }
+ vector->at(index).set(readFd);
+ }
+ return status;
+}
+
+/**
* Convenience API for writing a std::vector<P>
*/
template <typename P>
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index db4a36b..94ab9f0 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -1034,9 +1034,9 @@
binder_buffer_object obj {
.hdr = { .type = BINDER_TYPE_PTR },
+ .flags = 0,
.buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
.length = 4,
- .flags = 0,
};
data.setDataCapacity(1024);
// Write a bogus object at offset 0 to get an entry in the offset table
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 45fea85..4ee9f55 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -397,7 +397,7 @@
if (deleteMapEntry(gTisMapFd, &key) && errno != ENOENT) return false;
}
- concurrent_val_t czeros = {.policy = {0}, .active = {0}};
+ concurrent_val_t czeros = { .active = {0}, .policy = {0}, };
std::vector<concurrent_val_t> cvals(gNCpus, czeros);
for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
if (writeToMapEntry(gConcurrentMapFd, &key, cvals.data(), BPF_EXIST) && errno != ENOENT)
diff --git a/libs/dumputils/dump_utils.cpp b/libs/dumputils/dump_utils.cpp
index 250f902..56b94c1 100644
--- a/libs/dumputils/dump_utils.cpp
+++ b/libs/dumputils/dump_utils.cpp
@@ -62,6 +62,9 @@
"android.hardware.sensors@1.0::ISensors",
"android.hardware.thermal@2.0::IThermal",
"android.hardware.vr@1.0::IVr",
+ "android.hardware.automotive.audiocontrol@1.0::IAudioControl",
+ "android.hardware.automotive.vehicle@2.0::IVehicle",
+ "android.hardware.automotive.evs@1.0::IEvsCamera",
NULL,
};
diff --git a/libs/nativewindow/Android.bp b/libs/nativewindow/Android.bp
index 27ab482..55400c7 100644
--- a/libs/nativewindow/Android.bp
+++ b/libs/nativewindow/Android.bp
@@ -85,6 +85,11 @@
export_header_lib_headers: [
"libnativebase_headers",
],
+
+ stubs: {
+ symbol_file: "libnativewindow.map.txt",
+ versions: ["29"],
+ },
}
llndk_library {
diff --git a/libs/nativewindow/libnativewindow.map.txt b/libs/nativewindow/libnativewindow.map.txt
index daf1dcc..f59e8f0 100644
--- a/libs/nativewindow/libnativewindow.map.txt
+++ b/libs/nativewindow/libnativewindow.map.txt
@@ -2,9 +2,9 @@
global:
AHardwareBuffer_acquire;
AHardwareBuffer_allocate;
- AHardwareBuffer_createFromHandle; # llndk
+ AHardwareBuffer_createFromHandle; # llndk # apex
AHardwareBuffer_describe;
- AHardwareBuffer_getNativeHandle; # llndk
+ AHardwareBuffer_getNativeHandle; # llndk # apex
AHardwareBuffer_isSupported; # introduced=29
AHardwareBuffer_lock;
AHardwareBuffer_lockAndGetInfo; # introduced=29
diff --git a/libs/nativewindow/tests/AHardwareBufferTest.cpp b/libs/nativewindow/tests/AHardwareBufferTest.cpp
index cc2731d..71b1f9f 100644
--- a/libs/nativewindow/tests/AHardwareBufferTest.cpp
+++ b/libs/nativewindow/tests/AHardwareBufferTest.cpp
@@ -20,6 +20,7 @@
#include <android/hardware_buffer.h>
#include <private/android/AHardwareBufferHelpers.h>
#include <android/hardware/graphics/common/1.0/types.h>
+#include <vndk/hardware_buffer.h>
#include <gtest/gtest.h>
@@ -100,9 +101,33 @@
(uint64_t)BufferUsage::CPU_WRITE_RARELY,
AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY));
-EXPECT_TRUE(TestUsageConversion(
+ EXPECT_TRUE(TestUsageConversion(
(uint64_t)BufferUsage::GPU_RENDER_TARGET | (uint64_t)BufferUsage::GPU_TEXTURE |
- 1ull << 29 | 1ull << 57,
+ 1ull << 29 | 1ull << 57,
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_VENDOR_1 | AHARDWAREBUFFER_USAGE_VENDOR_13));
}
+
+TEST(AHardwareBufferTest, GetCreateHandleTest) {
+ AHardwareBuffer_Desc desc{
+ .width = 64,
+ .height = 1,
+ .layers = 1,
+ .format = AHARDWAREBUFFER_FORMAT_BLOB,
+ .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ .stride = 64,
+ };
+
+ AHardwareBuffer* buffer = nullptr;
+ EXPECT_EQ(0, AHardwareBuffer_allocate(&desc, &buffer));
+ const native_handle_t* handle = AHardwareBuffer_getNativeHandle(buffer);
+ EXPECT_NE(nullptr, handle);
+
+ AHardwareBuffer* otherBuffer = nullptr;
+ EXPECT_EQ(0, AHardwareBuffer_createFromHandle(
+ &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &otherBuffer));
+ EXPECT_NE(nullptr, otherBuffer);
+
+ AHardwareBuffer_release(buffer);
+ AHardwareBuffer_release(otherBuffer);
+}
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index b706a74..8863ec2 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -50,43 +50,35 @@
public:
FakeInputDispatcherPolicy() {
- mInputEventFiltered = false;
- mTime = -1;
- mAction = -1;
- mDisplayId = -1;
mOnPointerDownToken.clear();
}
- void assertFilterInputEventWasCalledWithExpectedArgs(const NotifyMotionArgs* args) {
- ASSERT_TRUE(mInputEventFiltered)
- << "Expected filterInputEvent() to have been called.";
+ void assertFilterInputEventWasCalled(const NotifyKeyArgs& args) {
+ ASSERT_NE(nullptr, mFilteredEvent) << "Expected filterInputEvent() to have been called.";
+ ASSERT_EQ(mFilteredEvent->getType(), AINPUT_EVENT_TYPE_KEY);
- ASSERT_EQ(mTime, args->eventTime)
- << "Expected time of filtered event was not matched";
- ASSERT_EQ(mAction, args->action)
- << "Expected action of filtered event was not matched";
- ASSERT_EQ(mDisplayId, args->displayId)
- << "Expected displayId of filtered event was not matched";
+ const KeyEvent& keyEvent = static_cast<const KeyEvent&>(*mFilteredEvent);
+ ASSERT_EQ(keyEvent.getEventTime(), args.eventTime);
+ ASSERT_EQ(keyEvent.getAction(), args.action);
+ ASSERT_EQ(keyEvent.getDisplayId(), args.displayId);
reset();
}
- void assertFilterInputEventWasCalledWithExpectedArgs(const NotifyKeyArgs* args) {
- ASSERT_TRUE(mInputEventFiltered)
- << "Expected filterInputEvent() to have been called.";
+ void assertFilterInputEventWasCalled(const NotifyMotionArgs& args) {
+ ASSERT_NE(nullptr, mFilteredEvent) << "Expected filterInputEvent() to have been called.";
+ ASSERT_EQ(mFilteredEvent->getType(), AINPUT_EVENT_TYPE_MOTION);
- ASSERT_EQ(mTime, args->eventTime)
- << "Expected time of filtered event was not matched";
- ASSERT_EQ(mAction, args->action)
- << "Expected action of filtered event was not matched";
- ASSERT_EQ(mDisplayId, args->displayId)
- << "Expected displayId of filtered event was not matched";
+ const MotionEvent& motionEvent = static_cast<const MotionEvent&>(*mFilteredEvent);
+ ASSERT_EQ(motionEvent.getEventTime(), args.eventTime);
+ ASSERT_EQ(motionEvent.getAction(), args.action);
+ ASSERT_EQ(motionEvent.getDisplayId(), args.displayId);
reset();
}
void assertFilterInputEventWasNotCalled() {
- ASSERT_FALSE(mInputEventFiltered)
+ ASSERT_EQ(nullptr, mFilteredEvent)
<< "Expected filterInputEvent() to not have been called.";
}
@@ -97,10 +89,7 @@
}
private:
- bool mInputEventFiltered;
- nsecs_t mTime;
- int32_t mAction;
- int32_t mDisplayId;
+ std::unique_ptr<InputEvent> mFilteredEvent;
sp<IBinder> mOnPointerDownToken;
virtual void notifyConfigurationChanged(nsecs_t) {
@@ -122,26 +111,20 @@
*outConfig = mConfig;
}
- virtual bool filterInputEvent(const InputEvent* inputEvent, uint32_t policyFlags) {
+ virtual bool filterInputEvent(const InputEvent* inputEvent, uint32_t policyFlags) override {
switch (inputEvent->getType()) {
case AINPUT_EVENT_TYPE_KEY: {
const KeyEvent* keyEvent = static_cast<const KeyEvent*>(inputEvent);
- mTime = keyEvent->getEventTime();
- mAction = keyEvent->getAction();
- mDisplayId = keyEvent->getDisplayId();
+ mFilteredEvent = std::make_unique<KeyEvent>(*keyEvent);
break;
}
case AINPUT_EVENT_TYPE_MOTION: {
const MotionEvent* motionEvent = static_cast<const MotionEvent*>(inputEvent);
- mTime = motionEvent->getEventTime();
- mAction = motionEvent->getAction();
- mDisplayId = motionEvent->getDisplayId();
+ mFilteredEvent = std::make_unique<MotionEvent>(*motionEvent);
break;
}
}
-
- mInputEventFiltered = true;
return true;
}
@@ -176,10 +159,7 @@
}
void reset() {
- mInputEventFiltered = false;
- mTime = -1;
- mAction = -1;
- mDisplayId = -1;
+ mFilteredEvent = nullptr;
mOnPointerDownToken.clear();
}
};
@@ -385,55 +365,87 @@
class FakeInputReceiver {
public:
- void consumeEvent(int32_t expectedEventType, int32_t expectedDisplayId,
- int32_t expectedFlags = 0) {
+ InputEvent* consume() {
uint32_t consumeSeq;
InputEvent* event;
- status_t status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1,
- &consumeSeq, &event);
- ASSERT_EQ(OK, status)
- << mName.c_str() << ": consumer consume should return OK.";
- ASSERT_TRUE(event != nullptr)
- << mName.c_str() << ": consumer should have returned non-NULL event.";
+ std::chrono::time_point start = std::chrono::steady_clock::now();
+ status_t status = WOULD_BLOCK;
+ while (status == WOULD_BLOCK) {
+ status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1, &consumeSeq,
+ &event);
+ std::chrono::duration elapsed = std::chrono::steady_clock::now() - start;
+ if (elapsed > 100ms) {
+ break;
+ }
+ }
+
+ if (status == WOULD_BLOCK) {
+ // Just means there's no event available.
+ return nullptr;
+ }
+
+ if (status != OK) {
+ ADD_FAILURE() << mName.c_str() << ": consumer consume should return OK.";
+ return nullptr;
+ }
+ if (event == nullptr) {
+ ADD_FAILURE() << "Consumed correctly, but received NULL event from consumer";
+ return nullptr;
+ }
+
+ status = mConsumer->sendFinishedSignal(consumeSeq, handled());
+ if (status != OK) {
+ ADD_FAILURE() << mName.c_str() << ": consumer sendFinishedSignal should return OK.";
+ }
+ return event;
+ }
+
+ void consumeEvent(int32_t expectedEventType, int32_t expectedAction, int32_t expectedDisplayId,
+ int32_t expectedFlags) {
+ InputEvent* event = consume();
+
+ ASSERT_NE(nullptr, event) << mName.c_str()
+ << ": consumer should have returned non-NULL event.";
ASSERT_EQ(expectedEventType, event->getType())
<< mName.c_str() << ": event type should match.";
- ASSERT_EQ(expectedDisplayId, event->getDisplayId())
- << mName.c_str() << ": event displayId should be the same as expected.";
+ EXPECT_EQ(expectedDisplayId, event->getDisplayId());
- int32_t flags;
switch (expectedEventType) {
case AINPUT_EVENT_TYPE_KEY: {
- KeyEvent* typedEvent = static_cast<KeyEvent*>(event);
- flags = typedEvent->getFlags();
+ const KeyEvent& keyEvent = static_cast<const KeyEvent&>(*event);
+ EXPECT_EQ(expectedAction, keyEvent.getAction());
+ EXPECT_EQ(expectedFlags, keyEvent.getFlags());
break;
}
case AINPUT_EVENT_TYPE_MOTION: {
- MotionEvent* typedEvent = static_cast<MotionEvent*>(event);
- flags = typedEvent->getFlags();
+ const MotionEvent& motionEvent = static_cast<const MotionEvent&>(*event);
+ EXPECT_EQ(expectedAction, motionEvent.getAction());
+ EXPECT_EQ(expectedFlags, motionEvent.getFlags());
break;
}
default: {
FAIL() << mName.c_str() << ": invalid event type: " << expectedEventType;
}
}
- ASSERT_EQ(expectedFlags, flags)
- << mName.c_str() << ": event flags should be the same as expected.";
+ }
- status = mConsumer->sendFinishedSignal(consumeSeq, handled());
- ASSERT_EQ(OK, status)
- << mName.c_str() << ": consumer sendFinishedSignal should return OK.";
+ void consumeKeyDown(int32_t expectedDisplayId, int32_t expectedFlags = 0) {
+ consumeEvent(AINPUT_EVENT_TYPE_KEY, AKEY_EVENT_ACTION_DOWN, expectedDisplayId,
+ expectedFlags);
+ }
+
+ void consumeMotionDown(int32_t expectedDisplayId, int32_t expectedFlags = 0) {
+ consumeEvent(AINPUT_EVENT_TYPE_MOTION, AMOTION_EVENT_ACTION_DOWN, expectedDisplayId,
+ expectedFlags);
}
void assertNoEvents() {
- uint32_t consumeSeq;
- InputEvent* event;
- status_t status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1,
- &consumeSeq, &event);
- ASSERT_NE(OK, status)
+ InputEvent* event = consume();
+ ASSERT_EQ(nullptr, event)
<< mName.c_str()
- << ": should not have received any events, so consume(..) should not return OK.";
+ << ": should not have received any events, so consume() should return NULL";
}
protected:
@@ -631,7 +643,7 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Window should receive motion event.
- window->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ window->consumeMotionDown(ADISPLAY_ID_DEFAULT);
}
// The foreground window should receive the first touch down event.
@@ -652,7 +664,7 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Top window should receive the touch down event. Second window should not receive anything.
- windowTop->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowTop->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowSecond->assertNoEvents();
}
@@ -678,7 +690,7 @@
// Focused window should receive event.
windowTop->assertNoEvents();
- windowSecond->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowSecond->consumeKeyDown(ADISPLAY_ID_NONE);
}
TEST_F(InputDispatcherTest, SetInputWindow_FocusPriority) {
@@ -703,7 +715,7 @@
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Top focused window should receive event.
- windowTop->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowTop->consumeKeyDown(ADISPLAY_ID_NONE);
windowSecond->assertNoEvents();
}
@@ -733,7 +745,7 @@
// Top window is invalid, so it should not receive any input event.
windowTop->assertNoEvents();
- windowSecond->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowSecond->consumeKeyDown(ADISPLAY_ID_NONE);
}
TEST_F(InputDispatcherTest, DispatchMouseEventsUnderCursor) {
@@ -758,7 +770,7 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED,
injectMotionEvent(mDispatcher, AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_MOUSE,
ADISPLAY_ID_DEFAULT, 610, 400, 599, 400));
- windowLeft->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowLeft->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowRight->assertNoEvents();
}
@@ -814,7 +826,7 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectMotionDown(mDispatcher,
AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
// Test touch down on second display.
@@ -822,29 +834,29 @@
AINPUT_SOURCE_TOUCHSCREEN, SECOND_DISPLAY_ID))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
+ windowInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
}
TEST_F(InputDispatcherFocusOnTwoDisplaysTest, SetInputWindow_MultiDisplayFocus) {
// Test inject a key down with display id specified.
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectKeyDown(mDispatcher, ADISPLAY_ID_DEFAULT))
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeKeyDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
// Test inject a key down without display id specified.
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectKeyDown(mDispatcher))
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
// Remove secondary display.
std::vector<sp<InputWindowHandle>> noWindows;
mDispatcher->setInputWindows(noWindows, SECOND_DISPLAY_ID);
// Expect old focus should receive a cancel event.
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE,
- AKEY_EVENT_FLAG_CANCELED);
+ windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, AKEY_EVENT_ACTION_UP, ADISPLAY_ID_NONE,
+ AKEY_EVENT_FLAG_CANCELED);
// Test inject a key down, should timeout because of no target window.
ASSERT_EQ(INPUT_EVENT_INJECTION_TIMED_OUT, injectKeyDown(mDispatcher))
@@ -873,8 +885,8 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectMotionDown(mDispatcher,
AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
- monitorInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
+ monitorInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
monitorInSecondary->assertNoEvents();
@@ -884,8 +896,8 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
+ windowInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
+ monitorInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
// Test inject a non-pointer motion event.
// If specific a display, it will dispatch to the focused window of particular display,
@@ -895,8 +907,8 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_NONE);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeMotionDown(ADISPLAY_ID_NONE);
+ monitorInSecondary->consumeMotionDown(ADISPLAY_ID_NONE);
}
// Test per-display input monitors for key event.
@@ -912,8 +924,8 @@
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
+ monitorInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
}
class InputFilterTest : public InputDispatcherTest {
@@ -931,7 +943,7 @@
mDispatcher->notifyMotion(&motionArgs);
if (expectToBeFiltered) {
- mFakePolicy->assertFilterInputEventWasCalledWithExpectedArgs(&motionArgs);
+ mFakePolicy->assertFilterInputEventWasCalled(motionArgs);
} else {
mFakePolicy->assertFilterInputEventWasNotCalled();
}
@@ -946,7 +958,7 @@
mDispatcher->notifyKey(&keyArgs);
if (expectToBeFiltered) {
- mFakePolicy->assertFilterInputEventWasCalledWithExpectedArgs(&keyArgs);
+ mFakePolicy->assertFilterInputEventWasCalled(keyArgs);
} else {
mFakePolicy->assertFilterInputEventWasNotCalled();
}
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index f529a44..4b71bd8 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -165,7 +165,9 @@
"Scheduler/PhaseOffsets.cpp",
"Scheduler/Scheduler.cpp",
"Scheduler/SchedulerUtils.cpp",
+ "Scheduler/Timer.cpp",
"Scheduler/VSyncDispatchTimerQueue.cpp",
+ "Scheduler/VSyncPredictor.cpp",
"Scheduler/VSyncModulator.cpp",
"StartPropertySetThread.cpp",
"SurfaceFlinger.cpp",
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index a25709c..94c4a81 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -77,9 +77,9 @@
// with the clone layer trying to use the deleted texture.
mFlinger->deleteTextureAsync(mTextureName);
}
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
}
void BufferLayer::useSurfaceDamage() {
@@ -286,7 +286,7 @@
return hasReadyFrame();
}
-bool BufferLayer::onPostComposition(const std::optional<DisplayId>& displayId,
+bool BufferLayer::onPostComposition(sp<const DisplayDevice> displayDevice,
const std::shared_ptr<FenceTime>& glDoneFence,
const std::shared_ptr<FenceTime>& presentFence,
const CompositorTiming& compositorTiming) {
@@ -305,8 +305,16 @@
nsecs_t desiredPresentTime = mBufferInfo.mDesiredPresentTime;
mFrameTracker.setDesiredPresentTime(desiredPresentTime);
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->setDesiredTime(layerID, mCurrentFrameNumber, desiredPresentTime);
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->setDesiredTime(layerId, mCurrentFrameNumber, desiredPresentTime);
+
+ const auto outputLayer = findOutputLayerForDisplay(displayDevice);
+ if (outputLayer && outputLayer->requiresClientComposition()) {
+ nsecs_t clientCompositionTimestamp = outputLayer->getState().clientCompositionTimestamp;
+ mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(), mCurrentFrameNumber,
+ clientCompositionTimestamp,
+ FrameTracer::FrameEvent::FALLBACK_COMPOSITION);
+ }
std::shared_ptr<FenceTime> frameReadyFence = mBufferInfo.mFenceTime;
if (frameReadyFence->isValid()) {
@@ -317,17 +325,18 @@
mFrameTracker.setFrameReadyTime(desiredPresentTime);
}
+ const auto displayId = displayDevice->getId();
if (presentFence->isValid()) {
- mFlinger->mTimeStats->setPresentFence(layerID, mCurrentFrameNumber, presentFence);
- mFlinger->mFrameTracer->traceFence(layerID, getCurrentBufferId(), mCurrentFrameNumber,
+ mFlinger->mTimeStats->setPresentFence(layerId, mCurrentFrameNumber, presentFence);
+ mFlinger->mFrameTracer->traceFence(layerId, getCurrentBufferId(), mCurrentFrameNumber,
presentFence, FrameTracer::FrameEvent::PRESENT_FENCE);
mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
} else if (displayId && mFlinger->getHwComposer().isConnected(*displayId)) {
// The HWC doesn't support present fences, so use the refresh
// timestamp instead.
const nsecs_t actualPresentTime = mFlinger->getHwComposer().getRefreshTimestamp(*displayId);
- mFlinger->mTimeStats->setPresentTime(layerID, mCurrentFrameNumber, actualPresentTime);
- mFlinger->mFrameTracer->traceTimestamp(layerID, getCurrentBufferId(), mCurrentFrameNumber,
+ mFlinger->mTimeStats->setPresentTime(layerId, mCurrentFrameNumber, actualPresentTime);
+ mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(), mCurrentFrameNumber,
actualPresentTime,
FrameTracer::FrameEvent::PRESENT_FENCE);
mFrameTracker.setActualPresentTime(actualPresentTime);
diff --git a/services/surfaceflinger/BufferLayer.h b/services/surfaceflinger/BufferLayer.h
index 656ba12..16855d2 100644
--- a/services/surfaceflinger/BufferLayer.h
+++ b/services/surfaceflinger/BufferLayer.h
@@ -78,7 +78,7 @@
bool isHdrY410() const override;
- bool onPostComposition(const std::optional<DisplayId>& displayId,
+ bool onPostComposition(sp<const DisplayDevice> displayDevice,
const std::shared_ptr<FenceTime>& glDoneFence,
const std::shared_ptr<FenceTime>& presentFence,
const CompositorTiming& compositorTiming) override;
diff --git a/services/surfaceflinger/BufferQueueLayer.cpp b/services/surfaceflinger/BufferQueueLayer.cpp
index 6896da7..d51d34b 100644
--- a/services/surfaceflinger/BufferQueueLayer.cpp
+++ b/services/surfaceflinger/BufferQueueLayer.cpp
@@ -223,7 +223,7 @@
// BufferItem's that weren't actually queued. This can happen in shared
// buffer mode.
bool queuedBuffer = false;
- const int32_t layerID = getSequence();
+ const int32_t layerId = getSequence();
LayerRejecter r(mDrawingState, getCurrentState(), recomputeVisibleRegions,
getProducerStickyTransform() != 0, mName, mOverrideScalingMode,
getTransformToDisplayInverse());
@@ -264,7 +264,7 @@
if (queuedBuffer) {
Mutex::Autolock lock(mQueueItemLock);
mConsumer->mergeSurfaceDamage(mQueueItems[0].mSurfaceDamage);
- mFlinger->mTimeStats->removeTimeRecord(layerID, mQueueItems[0].mFrameNumber);
+ mFlinger->mTimeStats->removeTimeRecord(layerId, mQueueItems[0].mFrameNumber);
mQueueItems.removeAt(0);
mQueuedFrames--;
}
@@ -278,8 +278,8 @@
Mutex::Autolock lock(mQueueItemLock);
mQueueItems.clear();
mQueuedFrames = 0;
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
}
// Once we have hit this state, the shadow queue may no longer
@@ -301,19 +301,17 @@
// updateTexImage
while (mQueueItems[0].mFrameNumber != currentFrameNumber) {
mConsumer->mergeSurfaceDamage(mQueueItems[0].mSurfaceDamage);
- mFlinger->mTimeStats->removeTimeRecord(layerID, mQueueItems[0].mFrameNumber);
+ mFlinger->mTimeStats->removeTimeRecord(layerId, mQueueItems[0].mFrameNumber);
mQueueItems.removeAt(0);
mQueuedFrames--;
}
uint64_t bufferID = mQueueItems[0].mGraphicBuffer->getId();
- mFlinger->mTimeStats->setAcquireFence(layerID, currentFrameNumber,
- mQueueItems[0].mFenceTime);
- mFlinger->mFrameTracer->traceFence(layerID, bufferID, currentFrameNumber,
+ mFlinger->mFrameTracer->traceFence(layerId, bufferID, currentFrameNumber,
mQueueItems[0].mFenceTime,
FrameTracer::FrameEvent::ACQUIRE_FENCE);
- mFlinger->mTimeStats->setLatchTime(layerID, currentFrameNumber, latchTime);
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferID, currentFrameNumber, latchTime,
+ mFlinger->mTimeStats->setLatchTime(layerId, currentFrameNumber, latchTime);
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferID, currentFrameNumber, latchTime,
FrameTracer::FrameEvent::LATCH);
mQueueItems.removeAt(0);
@@ -373,28 +371,28 @@
// -----------------------------------------------------------------------
void BufferQueueLayer::onFrameDequeued(const uint64_t bufferId) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceNewLayer(layerID, getName().c_str());
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceNewLayer(layerId, getName().c_str());
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
systemTime(), FrameTracer::FrameEvent::DEQUEUE);
}
void BufferQueueLayer::onFrameDetached(const uint64_t bufferId) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceNewLayer(layerID, getName().c_str());
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceNewLayer(layerId, getName().c_str());
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
systemTime(), FrameTracer::FrameEvent::DETACH);
}
void BufferQueueLayer::onFrameCancelled(const uint64_t bufferId) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
systemTime(), FrameTracer::FrameEvent::CANCEL);
}
void BufferQueueLayer::onFrameAvailable(const BufferItem& item) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceTimestamp(layerID, item.mGraphicBuffer->getId(), item.mFrameNumber,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceTimestamp(layerId, item.mGraphicBuffer->getId(), item.mFrameNumber,
systemTime(), FrameTracer::FrameEvent::QUEUE);
ATRACE_CALL();
diff --git a/services/surfaceflinger/BufferStateLayer.cpp b/services/surfaceflinger/BufferStateLayer.cpp
index 33cd0dc..d68fe8e 100644
--- a/services/surfaceflinger/BufferStateLayer.cpp
+++ b/services/surfaceflinger/BufferStateLayer.cpp
@@ -240,10 +240,10 @@
mCurrentState.modified = true;
setTransactionFlags(eTransactionNeeded);
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->setPostTime(layerID, mFrameNumber, getName().c_str(), postTime);
- mFlinger->mFrameTracer->traceNewLayer(layerID, getName().c_str());
- mFlinger->mFrameTracer->traceTimestamp(layerID, buffer->getId(), mFrameNumber, postTime,
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->setPostTime(layerId, mFrameNumber, getName().c_str(), postTime);
+ mFlinger->mFrameTracer->traceNewLayer(layerId, getName().c_str());
+ mFlinger->mFrameTracer->traceTimestamp(layerId, buffer->getId(), mFrameNumber, postTime,
FrameTracer::FrameEvent::POST);
mCurrentState.desiredPresentTime = desiredPresentTime;
@@ -458,7 +458,7 @@
return NO_ERROR;
}
- const int32_t layerID = getSequence();
+ const int32_t layerId = getSequence();
// Reject if the layer is invalid
uint32_t bufferWidth = s.buffer->width;
@@ -480,7 +480,7 @@
ALOGE("[%s] rejecting buffer: "
"bufferWidth=%d, bufferHeight=%d, front.active.{w=%d, h=%d}",
getDebugName(), bufferWidth, bufferHeight, s.active.w, s.active.h);
- mFlinger->mTimeStats->removeTimeRecord(layerID, mFrameNumber);
+ mFlinger->mTimeStats->removeTimeRecord(layerId, mFrameNumber);
return BAD_VALUE;
}
@@ -497,18 +497,18 @@
// a GL-composited layer) not at all.
status_t err = bindTextureImage();
if (err != NO_ERROR) {
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
return BAD_VALUE;
}
}
const uint64_t bufferID = getCurrentBufferId();
- mFlinger->mTimeStats->setAcquireFence(layerID, mFrameNumber, mBufferInfo.mFenceTime);
- mFlinger->mFrameTracer->traceFence(layerID, bufferID, mFrameNumber, mBufferInfo.mFenceTime,
+ mFlinger->mTimeStats->setAcquireFence(layerId, mFrameNumber, mBufferInfo.mFenceTime);
+ mFlinger->mFrameTracer->traceFence(layerId, bufferID, mFrameNumber, mBufferInfo.mFenceTime,
FrameTracer::FrameEvent::ACQUIRE_FENCE);
- mFlinger->mTimeStats->setLatchTime(layerID, mFrameNumber, latchTime);
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferID, mFrameNumber, latchTime,
+ mFlinger->mTimeStats->setLatchTime(layerId, mFrameNumber, latchTime);
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferID, mFrameNumber, latchTime,
FrameTracer::FrameEvent::LATCH);
mCurrentStateModified = false;
diff --git a/services/surfaceflinger/ClientCache.cpp b/services/surfaceflinger/ClientCache.cpp
index 16fe27c..a5be01c 100644
--- a/services/surfaceflinger/ClientCache.cpp
+++ b/services/surfaceflinger/ClientCache.cpp
@@ -42,7 +42,7 @@
return false;
}
- auto& processBuffers = it->second;
+ auto& processBuffers = it->second.second;
auto bufItr = processBuffers.find(id);
if (bufItr == processBuffers.end()) {
@@ -86,12 +86,14 @@
return false;
}
auto [itr, success] =
- mBuffers.emplace(processToken, std::unordered_map<uint64_t, ClientCacheBuffer>());
+ mBuffers.emplace(processToken,
+ std::make_pair(token,
+ std::unordered_map<uint64_t, ClientCacheBuffer>()));
LOG_ALWAYS_FATAL_IF(!success, "failed to insert new process into client cache");
it = itr;
}
- auto& processBuffers = it->second;
+ auto& processBuffers = it->second.second;
if (processBuffers.size() > BUFFER_CACHE_MAX_SIZE) {
ALOGE("failed to cache buffer: cache is full");
@@ -120,7 +122,7 @@
}
}
- mBuffers[processToken].erase(id);
+ mBuffers[processToken].second.erase(id);
}
for (auto& recipient : pendingErase) {
@@ -180,7 +182,7 @@
return;
}
- for (auto& [id, clientCacheBuffer] : itr->second) {
+ for (auto& [id, clientCacheBuffer] : itr->second.second) {
client_cache_t cacheId = {processToken, id};
for (auto& recipient : clientCacheBuffer.recipients) {
sp<ErasedRecipient> erasedRecipient = recipient.promote();
diff --git a/services/surfaceflinger/ClientCache.h b/services/surfaceflinger/ClientCache.h
index aa6c80d..d7af7c0 100644
--- a/services/surfaceflinger/ClientCache.h
+++ b/services/surfaceflinger/ClientCache.h
@@ -61,7 +61,8 @@
std::set<wp<ErasedRecipient>> recipients;
};
std::map<wp<IBinder> /*caching process*/,
- std::unordered_map<uint64_t /*cache id*/, ClientCacheBuffer>>
+ std::pair<sp<IBinder> /*strong ref to caching process*/,
+ std::unordered_map<uint64_t /*cache id*/, ClientCacheBuffer>>>
mBuffers GUARDED_BY(mMutex);
class CacheDeathRecipient : public IBinder::DeathRecipient {
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
index 1347449..11cfccc 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
@@ -98,6 +98,9 @@
// Debugging
void dump(std::string& result) const;
+
+ // Timestamp for when the layer is queued for client composition
+ nsecs_t clientCompositionTimestamp;
};
} // namespace compositionengine::impl
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index 1953005..6877f8b 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -904,6 +904,7 @@
layerSettings.disableBlending = true;
}
+ layer->editState().clientCompositionTimestamp = systemTime();
clientCompositionLayers.push_back(*result);
}
}
diff --git a/services/surfaceflinger/CompositionEngine/tests/CallOrderStateMachineHelper.h b/services/surfaceflinger/CompositionEngine/tests/CallOrderStateMachineHelper.h
new file mode 100644
index 0000000..2675dcf
--- /dev/null
+++ b/services/surfaceflinger/CompositionEngine/tests/CallOrderStateMachineHelper.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * CallOrderStateMachineHelper is a helper class for setting up a compile-time
+ * checked state machine that a sequence of calls is correct for completely
+ * setting up the state for some other type.
+ *
+ * Two examples where this could be used are with setting up a "Builder" flow
+ * for initializing an instance of some type, and writing tests where the state
+ * machine sets up expectations and preconditions, calls the function under
+ * test, and then evaluations postconditions.
+ *
+ * The purpose of this helper is to offload some of the boilerplate code to
+ * simplify the actual state classes, and is also a place to document how to
+ * go about setting up the state classes.
+ *
+ * To work at compile time, the idea is that each state is a unique C++ type,
+ * and the valid transitions between states are given by member functions on
+ * those types, with those functions returning a simple value type expressing
+ * the new state to use. Illegal state transitions become a compile error because
+ * a named member function does not exist.
+ *
+ * Example usage in a test:
+ *
+ * A two step (+ terminator step) setup process can defined using:
+ *
+ * class Step1 : public CallOrderStateMachineHelper<TestFixtureType, Step1> {
+ * [[nodiscard]] auto firstMockCalledWith(int value1) {
+ * // Set up an expectation or initial state using the fixture
+ * EXPECT_CALL(getInstance->firstMock, FirstCall(value1));
+ * return nextState<Step2>();
+ * }
+ * };
+ *
+ * class Step2 : public CallOrderStateMachineHelper<TestFixtureType, Step2> {
+ * [[nodiscard]] auto secondMockCalledWith(int value2) {
+ * // Set up an expectation or initial state using the fixture
+ * EXPECT_CALL(getInstance()->secondMock, SecondCall(value2));
+ * return nextState<StepExecute>();
+ * }
+ * };
+ *
+ * class StepExecute : public CallOrderStateMachineHelper<TestFixtureType, Step3> {
+ * void execute() {
+ * invokeFunctionUnderTest();
+ * }
+ * };
+ *
+ * Note how the non-terminator steps return by value and use [[nodiscard]] to
+ * enforce the setup flow. Only the terminator step returns void.
+ *
+ * This can then be used in the tests with:
+ *
+ * Step1::make(this).firstMockCalledWith(value1)
+ * .secondMockCalledWith(value2)
+ * .execute);
+ *
+ * If the test fixture defines a `verify()` helper function which returns
+ * `Step1::make(this)`, this can be simplified to:
+ *
+ * verify().firstMockCalledWith(value1)
+ * .secondMockCalledWith(value2)
+ * .execute();
+ *
+ * This is equivalent to the following calls made by the text function:
+ *
+ * EXPECT_CALL(firstMock, FirstCall(value1));
+ * EXPECT_CALL(secondMock, SecondCall(value2));
+ * invokeFunctionUnderTest();
+ */
+template <typename InstanceType, typename CurrentStateType>
+class CallOrderStateMachineHelper {
+public:
+ CallOrderStateMachineHelper() = default;
+
+ // Disallow copying
+ CallOrderStateMachineHelper(const CallOrderStateMachineHelper&) = delete;
+ CallOrderStateMachineHelper& operator=(const CallOrderStateMachineHelper&) = delete;
+
+ // Moving is intended use case.
+ CallOrderStateMachineHelper(CallOrderStateMachineHelper&&) = default;
+ CallOrderStateMachineHelper& operator=(CallOrderStateMachineHelper&&) = default;
+
+ // Using a static "Make" function means the CurrentStateType classes do not
+ // need anything other than a default no-argument constructor.
+ static CurrentStateType make(InstanceType* instance) {
+ auto helper = CurrentStateType();
+ helper.mInstance = instance;
+ return helper;
+ }
+
+ // Each non-terminal state function
+ template <typename NextStateType>
+ auto nextState() {
+ // Note: Further operations on the current state become undefined
+ // operations as the instance pointer is moved to the next state type.
+ // But that doesn't stop someone from storing an intermediate state
+ // instance as a local and possibly calling one than one member function
+ // on it. By swapping with nullptr, we at least can try to catch this
+ // this at runtime.
+ InstanceType* instance = nullptr;
+ std::swap(instance, mInstance);
+ return NextStateType::make(instance);
+ }
+
+ InstanceType* getInstance() const { return mInstance; }
+
+private:
+ InstanceType* mInstance;
+};
diff --git a/services/surfaceflinger/CompositionEngine/tests/CompositionEngineTest.cpp b/services/surfaceflinger/CompositionEngine/tests/CompositionEngineTest.cpp
index 353a5b0..49e7c70 100644
--- a/services/surfaceflinger/CompositionEngine/tests/CompositionEngineTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/CompositionEngineTest.cpp
@@ -15,10 +15,12 @@
*/
#include <compositionengine/CompositionRefreshArgs.h>
+#include <compositionengine/LayerFECompositionState.h>
#include <compositionengine/impl/CompositionEngine.h>
#include <compositionengine/mock/Layer.h>
#include <compositionengine/mock/LayerFE.h>
#include <compositionengine/mock/Output.h>
+#include <compositionengine/mock/OutputLayer.h>
#include <gtest/gtest.h>
#include <renderengine/mock/RenderEngine.h>
@@ -31,6 +33,7 @@
using ::testing::InSequence;
using ::testing::Ref;
using ::testing::Return;
+using ::testing::ReturnRef;
using ::testing::SaveArg;
using ::testing::StrictMock;
@@ -117,6 +120,97 @@
}
/*
+ * CompositionEngine::updateCursorAsync
+ */
+
+struct CompositionEngineUpdateCursorAsyncTest : public CompositionEngineTest {
+public:
+ CompositionEngineUpdateCursorAsyncTest() {
+ EXPECT_CALL(*mOutput1, getOutputLayerCount()).WillRepeatedly(Return(0));
+ EXPECT_CALL(*mOutput1, getOutputLayerOrderedByZByIndex(_)).Times(0);
+
+ EXPECT_CALL(*mOutput2, getOutputLayerCount()).WillRepeatedly(Return(1));
+ EXPECT_CALL(*mOutput2, getOutputLayerOrderedByZByIndex(0))
+ .WillRepeatedly(Return(&mOutput2OutputLayer1));
+
+ EXPECT_CALL(*mOutput3, getOutputLayerCount()).WillRepeatedly(Return(2));
+ EXPECT_CALL(*mOutput3, getOutputLayerOrderedByZByIndex(0))
+ .WillRepeatedly(Return(&mOutput3OutputLayer1));
+ EXPECT_CALL(*mOutput3, getOutputLayerOrderedByZByIndex(1))
+ .WillRepeatedly(Return(&mOutput3OutputLayer2));
+
+ EXPECT_CALL(mOutput2OutputLayer1, getLayerFE()).WillRepeatedly(ReturnRef(mOutput2Layer1FE));
+ EXPECT_CALL(mOutput3OutputLayer1, getLayerFE()).WillRepeatedly(ReturnRef(mOutput3Layer1FE));
+ EXPECT_CALL(mOutput3OutputLayer2, getLayerFE()).WillRepeatedly(ReturnRef(mOutput3Layer2FE));
+
+ EXPECT_CALL(mOutput2OutputLayer1, getLayer()).WillRepeatedly(ReturnRef(mOutput2Layer1));
+ EXPECT_CALL(mOutput3OutputLayer1, getLayer()).WillRepeatedly(ReturnRef(mOutput3Layer1));
+ EXPECT_CALL(mOutput3OutputLayer2, getLayer()).WillRepeatedly(ReturnRef(mOutput3Layer2));
+
+ EXPECT_CALL(mOutput2Layer1, editFEState()).WillRepeatedly(ReturnRef(mOutput2Layer1FEState));
+ EXPECT_CALL(mOutput3Layer1, editFEState()).WillRepeatedly(ReturnRef(mOutput3Layer1FEState));
+ EXPECT_CALL(mOutput3Layer2, editFEState()).WillRepeatedly(ReturnRef(mOutput3Layer2FEState));
+ }
+
+ StrictMock<mock::OutputLayer> mOutput2OutputLayer1;
+ StrictMock<mock::OutputLayer> mOutput3OutputLayer1;
+ StrictMock<mock::OutputLayer> mOutput3OutputLayer2;
+
+ StrictMock<mock::LayerFE> mOutput2Layer1FE;
+ StrictMock<mock::LayerFE> mOutput3Layer1FE;
+ StrictMock<mock::LayerFE> mOutput3Layer2FE;
+
+ StrictMock<mock::Layer> mOutput2Layer1;
+ StrictMock<mock::Layer> mOutput3Layer1;
+ StrictMock<mock::Layer> mOutput3Layer2;
+
+ LayerFECompositionState mOutput2Layer1FEState;
+ LayerFECompositionState mOutput3Layer1FEState;
+ LayerFECompositionState mOutput3Layer2FEState;
+};
+
+TEST_F(CompositionEngineUpdateCursorAsyncTest, handlesNoOutputs) {
+ mEngine.updateCursorAsync(mRefreshArgs);
+}
+
+TEST_F(CompositionEngineUpdateCursorAsyncTest, handlesNoLayersBeingCursorLayers) {
+ EXPECT_CALL(mOutput2OutputLayer1, isHardwareCursor()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mOutput3OutputLayer1, isHardwareCursor()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mOutput3OutputLayer2, isHardwareCursor()).WillRepeatedly(Return(false));
+
+ mRefreshArgs.outputs = {mOutput1, mOutput2, mOutput3};
+
+ mEngine.updateCursorAsync(mRefreshArgs);
+}
+
+TEST_F(CompositionEngineUpdateCursorAsyncTest, handlesMultipleLayersBeingCursorLayers) {
+ {
+ InSequence seq;
+ EXPECT_CALL(mOutput2OutputLayer1, isHardwareCursor()).WillRepeatedly(Return(true));
+ EXPECT_CALL(mOutput2Layer1FE, latchCursorCompositionState(Ref(mOutput2Layer1FEState)));
+ EXPECT_CALL(mOutput2OutputLayer1, writeCursorPositionToHWC());
+ }
+
+ {
+ InSequence seq;
+ EXPECT_CALL(mOutput3OutputLayer1, isHardwareCursor()).WillRepeatedly(Return(true));
+ EXPECT_CALL(mOutput3Layer1FE, latchCursorCompositionState(Ref(mOutput3Layer1FEState)));
+ EXPECT_CALL(mOutput3OutputLayer1, writeCursorPositionToHWC());
+ }
+
+ {
+ InSequence seq;
+ EXPECT_CALL(mOutput3OutputLayer2, isHardwareCursor()).WillRepeatedly(Return(true));
+ EXPECT_CALL(mOutput3Layer2FE, latchCursorCompositionState(Ref(mOutput3Layer2FEState)));
+ EXPECT_CALL(mOutput3OutputLayer2, writeCursorPositionToHWC());
+ }
+
+ mRefreshArgs.outputs = {mOutput1, mOutput2, mOutput3};
+
+ mEngine.updateCursorAsync(mRefreshArgs);
+}
+
+/*
* CompositionEngine::preComposition
*/
diff --git a/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp b/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
index c07dfbb..21b9aa9 100644
--- a/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
@@ -19,35 +19,6 @@
#include <compositionengine/mock/CompositionEngine.h>
#include <gtest/gtest.h>
-namespace android::hardware::graphics::common::V1_1 {
-
-// Note: These operator overloads need to be defined in the same namespace as
-// the values they print.
-
-std::ostream& operator<<(std::ostream& os, const RenderIntent& value) {
- return os << toString(value) << " (" << static_cast<std::underlying_type_t<Dataspace>>(value)
- << ")";
-}
-
-} // namespace android::hardware::graphics::common::V1_1
-
-namespace android::hardware::graphics::common::V1_2 {
-
-// Note: These operator overloads need to be defined in the same namespace as
-// the values they print.
-
-std::ostream& operator<<(std::ostream& os, const Dataspace& value) {
- return os << toString(value) << " (" << static_cast<std::underlying_type_t<Dataspace>>(value)
- << ")";
-}
-
-std::ostream& operator<<(std::ostream& os, const ColorMode& value) {
- return os << toString(value) << " (" << static_cast<std::underlying_type_t<Dataspace>>(value)
- << ")";
-}
-
-} // namespace android::hardware::graphics::common::V1_2
-
namespace android::compositionengine {
namespace {
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
index 7fce520..02bb146 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
@@ -16,6 +16,7 @@
#include <cmath>
+#include <android-base/stringprintf.h>
#include <compositionengine/LayerFECompositionState.h>
#include <compositionengine/impl/Output.h>
#include <compositionengine/impl/OutputCompositionState.h>
@@ -31,6 +32,7 @@
#include <ui/Rect.h>
#include <ui/Region.h>
+#include "CallOrderStateMachineHelper.h"
#include "RegionMatcher.h"
#include "TransformMatcher.h"
@@ -38,8 +40,14 @@
namespace {
using testing::_;
+using testing::ByMove;
+using testing::DoAll;
+using testing::InSequence;
+using testing::Mock;
+using testing::Ref;
using testing::Return;
using testing::ReturnRef;
+using testing::SetArgPointee;
using testing::StrictMock;
constexpr auto TR_IDENT = 0u;
@@ -49,6 +57,34 @@
const mat4 kNonIdentityHalf = mat4() * 0.5;
const mat4 kNonIdentityQuarter = mat4() * 0.25;
+constexpr OutputColorSetting kVendorSpecifiedOutputColorSetting =
+ static_cast<OutputColorSetting>(0x100);
+
+struct OutputPartialMockBase : public impl::Output {
+ // compositionengine::Output overrides
+ const OutputCompositionState& getState() const override { return mState; }
+ OutputCompositionState& editState() override { return mState; }
+
+ // Use mocks for all the remaining virtual functions
+ // not implemented by the base implementation class.
+ MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
+ MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex, compositionengine::OutputLayer*(size_t));
+ MOCK_METHOD3(ensureOutputLayer,
+ compositionengine::OutputLayer*(std::optional<size_t>,
+ const std::shared_ptr<compositionengine::Layer>&,
+ const sp<LayerFE>&));
+ MOCK_METHOD0(finalizePendingOutputLayers, void());
+ MOCK_METHOD0(clearOutputLayers, void());
+ MOCK_CONST_METHOD1(dumpState, void(std::string&));
+ MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
+ MOCK_METHOD2(injectOutputLayerForTest,
+ compositionengine::OutputLayer*(const std::shared_ptr<compositionengine::Layer>&,
+ const sp<LayerFE>&));
+ MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
+
+ impl::OutputCompositionState mState;
+};
+
struct OutputTest : public testing::Test {
class Output : public impl::Output {
public:
@@ -77,8 +113,70 @@
std::shared_ptr<Output> mOutput = createOutput(mCompositionEngine);
};
+// Extension of the base test useful for checking interactions with the LayerFE
+// functions to latch composition state.
+struct OutputLatchFEStateTest : public OutputTest {
+ OutputLatchFEStateTest() {
+ EXPECT_CALL(*mOutputLayer1, getLayer()).WillRepeatedly(ReturnRef(mLayer1));
+ EXPECT_CALL(*mOutputLayer2, getLayer()).WillRepeatedly(ReturnRef(mLayer2));
+ EXPECT_CALL(*mOutputLayer3, getLayer()).WillRepeatedly(ReturnRef(mLayer3));
+
+ EXPECT_CALL(*mOutputLayer1, getLayerFE()).WillRepeatedly(ReturnRef(mLayer1FE));
+ EXPECT_CALL(*mOutputLayer2, getLayerFE()).WillRepeatedly(ReturnRef(mLayer2FE));
+ EXPECT_CALL(*mOutputLayer3, getLayerFE()).WillRepeatedly(ReturnRef(mLayer3FE));
+
+ EXPECT_CALL(mLayer1, editFEState()).WillRepeatedly(ReturnRef(mLayer1FEState));
+ EXPECT_CALL(mLayer2, editFEState()).WillRepeatedly(ReturnRef(mLayer2FEState));
+ EXPECT_CALL(mLayer3, editFEState()).WillRepeatedly(ReturnRef(mLayer3FEState));
+ }
+
+ void injectLayer(std::unique_ptr<mock::OutputLayer> layer) {
+ mOutput->injectOutputLayerForTest(std::unique_ptr<OutputLayer>(layer.release()));
+ }
+
+ std::unique_ptr<mock::OutputLayer> mOutputLayer1{new StrictMock<mock::OutputLayer>};
+ std::unique_ptr<mock::OutputLayer> mOutputLayer2{new StrictMock<mock::OutputLayer>};
+ std::unique_ptr<mock::OutputLayer> mOutputLayer3{new StrictMock<mock::OutputLayer>};
+
+ StrictMock<mock::Layer> mLayer1;
+ StrictMock<mock::Layer> mLayer2;
+ StrictMock<mock::Layer> mLayer3;
+
+ StrictMock<mock::LayerFE> mLayer1FE;
+ StrictMock<mock::LayerFE> mLayer2FE;
+ StrictMock<mock::LayerFE> mLayer3FE;
+
+ LayerFECompositionState mLayer1FEState;
+ LayerFECompositionState mLayer2FEState;
+ LayerFECompositionState mLayer3FEState;
+};
+
const Rect OutputTest::kDefaultDisplaySize{100, 200};
+using ColorProfile = compositionengine::Output::ColorProfile;
+
+void dumpColorProfile(ColorProfile profile, std::string& result, const char* name) {
+ android::base::StringAppendF(&result, "%s (%s[%d] %s[%d] %s[%d] %s[%d]) ", name,
+ toString(profile.mode).c_str(), profile.mode,
+ toString(profile.dataspace).c_str(), profile.dataspace,
+ toString(profile.renderIntent).c_str(), profile.renderIntent,
+ toString(profile.colorSpaceAgnosticDataspace).c_str(),
+ profile.colorSpaceAgnosticDataspace);
+}
+
+// Checks for a ColorProfile match
+MATCHER_P(ColorProfileEq, expected, "") {
+ std::string buf;
+ buf.append("ColorProfiles are not equal\n");
+ dumpColorProfile(expected, buf, "expected value");
+ dumpColorProfile(arg, buf, "actual value");
+ *result_listener << buf;
+
+ return (expected.mode == arg.mode) && (expected.dataspace == arg.dataspace) &&
+ (expected.renderIntent == arg.renderIntent) &&
+ (expected.colorSpaceAgnosticDataspace == arg.colorSpaceAgnosticDataspace);
+}
+
/*
* Basic construction
*/
@@ -268,10 +366,12 @@
}
/*
- * Output::setColorMode
+ * Output::setColorProfile
*/
-TEST_F(OutputTest, setColorModeSetsStateAndDirtiesOutputIfChanged) {
+using OutputSetColorProfileTest = OutputTest;
+
+TEST_F(OutputSetColorProfileTest, setsStateAndDirtiesOutputIfChanged) {
using ColorProfile = Output::ColorProfile;
EXPECT_CALL(*mDisplayColorProfile,
@@ -292,7 +392,7 @@
EXPECT_THAT(mOutput->getState().dirtyRegion, RegionEq(Region(kDefaultDisplaySize)));
}
-TEST_F(OutputTest, setColorModeDoesNothingIfNoChange) {
+TEST_F(OutputSetColorProfileTest, doesNothingIfNoChange) {
using ColorProfile = Output::ColorProfile;
EXPECT_CALL(*mDisplayColorProfile,
@@ -483,6 +583,84 @@
}
/*
+ * Output::setReleasedLayers()
+ */
+
+using OutputSetReleasedLayersTest = OutputTest;
+
+TEST_F(OutputSetReleasedLayersTest, setReleasedLayersTakesGivenLayers) {
+ sp<StrictMock<mock::LayerFE>> layer1FE{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> layer2FE{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> layer3FE{new StrictMock<mock::LayerFE>()};
+
+ Output::ReleasedLayers layers;
+ layers.push_back(layer1FE);
+ layers.push_back(layer2FE);
+ layers.push_back(layer3FE);
+
+ mOutput->setReleasedLayers(std::move(layers));
+
+ const auto& setLayers = mOutput->getReleasedLayersForTest();
+ ASSERT_EQ(3u, setLayers.size());
+ ASSERT_EQ(layer1FE.get(), setLayers[0].promote().get());
+ ASSERT_EQ(layer2FE.get(), setLayers[1].promote().get());
+ ASSERT_EQ(layer3FE.get(), setLayers[2].promote().get());
+}
+
+/*
+ * Output::updateLayerStateFromFE()
+ */
+
+using OutputUpdateLayerStateFromFETest = OutputLatchFEStateTest;
+
+TEST_F(OutputUpdateLayerStateFromFETest, handlesNoOutputLayerCase) {
+ CompositionRefreshArgs refreshArgs;
+
+ mOutput->updateLayerStateFromFE(refreshArgs);
+}
+
+TEST_F(OutputUpdateLayerStateFromFETest, latchesContentStateForAllContainedLayers) {
+ EXPECT_CALL(mLayer1FE,
+ latchCompositionState(Ref(mLayer1FEState), LayerFE::StateSubset::Content));
+ EXPECT_CALL(mLayer2FE,
+ latchCompositionState(Ref(mLayer2FEState), LayerFE::StateSubset::Content));
+ EXPECT_CALL(mLayer3FE,
+ latchCompositionState(Ref(mLayer3FEState), LayerFE::StateSubset::Content));
+
+ // Note: Must be performed after any expectations on these mocks
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ CompositionRefreshArgs refreshArgs;
+ refreshArgs.updatingGeometryThisFrame = false;
+
+ mOutput->updateLayerStateFromFE(refreshArgs);
+}
+
+TEST_F(OutputUpdateLayerStateFromFETest, latchesGeometryAndContentStateForAllContainedLayers) {
+ EXPECT_CALL(mLayer1FE,
+ latchCompositionState(Ref(mLayer1FEState),
+ LayerFE::StateSubset::GeometryAndContent));
+ EXPECT_CALL(mLayer2FE,
+ latchCompositionState(Ref(mLayer2FEState),
+ LayerFE::StateSubset::GeometryAndContent));
+ EXPECT_CALL(mLayer3FE,
+ latchCompositionState(Ref(mLayer3FEState),
+ LayerFE::StateSubset::GeometryAndContent));
+
+ // Note: Must be performed after any expectations on these mocks
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ CompositionRefreshArgs refreshArgs;
+ refreshArgs.updatingGeometryThisFrame = true;
+
+ mOutput->updateLayerStateFromFE(refreshArgs);
+}
+
+/*
* Output::updateAndWriteCompositionState()
*/
@@ -512,33 +690,10 @@
*/
struct OutputPrepareFrameTest : public testing::Test {
- struct OutputPartialMock : public impl::Output {
+ struct OutputPartialMock : public OutputPartialMockBase {
// Sets up the helper functions called by prepareFrame to use a mock
// implementations.
MOCK_METHOD0(chooseCompositionStrategy, void());
-
- // compositionengine::Output overrides
- const OutputCompositionState& getState() const override { return mState; }
- OutputCompositionState& editState() override { return mState; }
-
- // These need implementations though are not expected to be called.
- MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
- MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex,
- compositionengine::OutputLayer*(size_t));
- MOCK_METHOD3(ensureOutputLayer,
- compositionengine::OutputLayer*(
- std::optional<size_t>,
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD0(finalizePendingOutputLayers, void());
- MOCK_METHOD0(clearOutputLayers, void());
- MOCK_CONST_METHOD1(dumpState, void(std::string&));
- MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
- MOCK_METHOD2(injectOutputLayerForTest,
- compositionengine::OutputLayer*(
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
-
- impl::OutputCompositionState mState;
};
OutputPrepareFrameTest() {
@@ -586,6 +741,918 @@
}
/*
+ * Output::present()
+ */
+
+struct OutputPresentTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // All child helper functions Output::present() are defined as mocks,
+ // and those are tested separately, allowing the present() test to
+ // just cover the high level flow.
+ MOCK_METHOD1(updateColorProfile, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD1(updateAndWriteCompositionState,
+ void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD1(setColorTransform, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD0(beginFrame, void());
+ MOCK_METHOD0(prepareFrame, void());
+ MOCK_METHOD1(devOptRepaintFlash, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD1(finishFrame, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD0(postFramebuffer, void());
+ };
+
+ StrictMock<OutputPartialMock> mOutput;
+};
+
+TEST_F(OutputPresentTest, justInvokesChildFunctionsInSequence) {
+ CompositionRefreshArgs args;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, updateColorProfile(Ref(args)));
+ EXPECT_CALL(mOutput, updateAndWriteCompositionState(Ref(args)));
+ EXPECT_CALL(mOutput, setColorTransform(Ref(args)));
+ EXPECT_CALL(mOutput, beginFrame());
+ EXPECT_CALL(mOutput, prepareFrame());
+ EXPECT_CALL(mOutput, devOptRepaintFlash(Ref(args)));
+ EXPECT_CALL(mOutput, finishFrame(Ref(args)));
+ EXPECT_CALL(mOutput, postFramebuffer());
+
+ mOutput.present(args);
+}
+
+/*
+ * Output::updateColorProfile()
+ */
+
+struct OutputUpdateColorProfileTest : public testing::Test {
+ using TestType = OutputUpdateColorProfileTest;
+
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // All child helper functions Output::present() are defined as mocks,
+ // and those are tested separately, allowing the present() test to
+ // just cover the high level flow.
+ MOCK_METHOD1(setColorProfile, void(const ColorProfile&));
+ };
+
+ struct Layer {
+ Layer() {
+ EXPECT_CALL(mOutputLayer, getLayer()).WillRepeatedly(ReturnRef(mLayer));
+ EXPECT_CALL(mOutputLayer, getLayerFE()).WillRepeatedly(ReturnRef(mLayerFE));
+ EXPECT_CALL(mLayer, getFEState()).WillRepeatedly(ReturnRef(mLayerFEState));
+ }
+
+ StrictMock<mock::OutputLayer> mOutputLayer;
+ StrictMock<mock::Layer> mLayer;
+ StrictMock<mock::LayerFE> mLayerFE;
+ LayerFECompositionState mLayerFEState;
+ };
+
+ OutputUpdateColorProfileTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0))
+ .WillRepeatedly(Return(&mLayer1.mOutputLayer));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(1))
+ .WillRepeatedly(Return(&mLayer2.mOutputLayer));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(2))
+ .WillRepeatedly(Return(&mLayer3.mOutputLayer));
+ }
+
+ struct ExecuteState : public CallOrderStateMachineHelper<TestType, ExecuteState> {
+ void execute() { getInstance()->mOutput.updateColorProfile(getInstance()->mRefreshArgs); }
+ };
+
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ StrictMock<OutputPartialMock> mOutput;
+
+ Layer mLayer1;
+ Layer mLayer2;
+ Layer mLayer3;
+
+ CompositionRefreshArgs mRefreshArgs;
+};
+
+// TODO(b/144522012): Refactor Output::updateColorProfile and the related code
+// to make it easier to write unit tests.
+
+TEST_F(OutputUpdateColorProfileTest, setsAColorProfileWhenUnmanaged) {
+ // When the outputColorSetting is set to kUnmanaged, the implementation sets
+ // a simple default color profile without looking at anything else.
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(3));
+ EXPECT_CALL(mOutput,
+ setColorProfile(ColorProfileEq(
+ ColorProfile{ui::ColorMode::NATIVE, ui::Dataspace::UNKNOWN,
+ ui::RenderIntent::COLORIMETRIC, ui::Dataspace::UNKNOWN})));
+
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kUnmanaged;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+
+ mOutput.updateColorProfile(mRefreshArgs);
+}
+
+struct OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile
+ : public OutputUpdateColorProfileTest {
+ OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile() {
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(0));
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+ }
+
+ struct ExpectBestColorModeCallResultUsedToSetColorProfileState
+ : public CallOrderStateMachineHelper<
+ TestType, ExpectBestColorModeCallResultUsedToSetColorProfileState> {
+ [[nodiscard]] auto expectBestColorModeCallResultUsedToSetColorProfile(
+ ui::ColorMode colorMode, ui::Dataspace dataspace, ui::RenderIntent renderIntent) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(ui::Dataspace::V0_SRGB, ui::RenderIntent::ENHANCE, _, _,
+ _))
+ .WillOnce(DoAll(SetArgPointee<2>(dataspace), SetArgPointee<3>(colorMode),
+ SetArgPointee<4>(renderIntent)));
+ EXPECT_CALL(getInstance()->mOutput,
+ setColorProfile(
+ ColorProfileEq(ColorProfile{colorMode, dataspace, renderIntent,
+ ui::Dataspace::UNKNOWN})));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() {
+ return ExpectBestColorModeCallResultUsedToSetColorProfileState::make(this);
+ }
+};
+
+TEST_F(OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile,
+ Native_Unknown_Colorimetric_Set) {
+ verify().expectBestColorModeCallResultUsedToSetColorProfile(ui::ColorMode::NATIVE,
+ ui::Dataspace::UNKNOWN,
+ ui::RenderIntent::COLORIMETRIC)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile,
+ DisplayP3_DisplayP3_Enhance_Set) {
+ verify().expectBestColorModeCallResultUsedToSetColorProfile(ui::ColorMode::DISPLAY_P3,
+ ui::Dataspace::DISPLAY_P3,
+ ui::RenderIntent::ENHANCE)
+ .execute();
+}
+
+struct OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile
+ : public OutputUpdateColorProfileTest {
+ OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile() {
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(0));
+ EXPECT_CALL(*mDisplayColorProfile,
+ getBestColorMode(ui::Dataspace::V0_SRGB, ui::RenderIntent::ENHANCE, _, _, _))
+ .WillRepeatedly(DoAll(SetArgPointee<2>(ui::Dataspace::UNKNOWN),
+ SetArgPointee<3>(ui::ColorMode::NATIVE),
+ SetArgPointee<4>(ui::RenderIntent::COLORIMETRIC)));
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ }
+
+ struct IfColorSpaceAgnosticDataspaceSetToState
+ : public CallOrderStateMachineHelper<TestType, IfColorSpaceAgnosticDataspaceSetToState> {
+ [[nodiscard]] auto ifColorSpaceAgnosticDataspaceSetTo(ui::Dataspace dataspace) {
+ getInstance()->mRefreshArgs.colorSpaceAgnosticDataspace = dataspace;
+ return nextState<ThenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspaceState>();
+ }
+ };
+
+ struct ThenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspaceState
+ : public CallOrderStateMachineHelper<
+ TestType, ThenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspaceState> {
+ [[nodiscard]] auto thenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspace(
+ ui::Dataspace dataspace) {
+ EXPECT_CALL(getInstance()->mOutput,
+ setColorProfile(ColorProfileEq(
+ ColorProfile{ui::ColorMode::NATIVE, ui::Dataspace::UNKNOWN,
+ ui::RenderIntent::COLORIMETRIC, dataspace})));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfColorSpaceAgnosticDataspaceSetToState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile, DisplayP3) {
+ verify().ifColorSpaceAgnosticDataspaceSetTo(ui::Dataspace::DISPLAY_P3)
+ .thenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspace(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile, V0_SRGB) {
+ verify().ifColorSpaceAgnosticDataspaceSetTo(ui::Dataspace::V0_SRGB)
+ .thenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspace(ui::Dataspace::V0_SRGB)
+ .execute();
+}
+
+struct OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference
+ : public OutputUpdateColorProfileTest {
+ // Internally the implementation looks through the dataspaces of all the
+ // visible layers. The topmost one that also has an actual dataspace
+ // preference set is used to drive subsequent choices.
+
+ OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(3));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ }
+
+ struct IfTopLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, IfTopLayerDataspaceState> {
+ [[nodiscard]] auto ifTopLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer3.mLayerFEState.dataspace = dataspace;
+ return nextState<AndIfMiddleLayerDataspaceState>();
+ }
+ [[nodiscard]] auto ifTopLayerHasNoPreference() {
+ return ifTopLayerIs(ui::Dataspace::UNKNOWN);
+ }
+ };
+
+ struct AndIfMiddleLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, AndIfMiddleLayerDataspaceState> {
+ [[nodiscard]] auto andIfMiddleLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer2.mLayerFEState.dataspace = dataspace;
+ return nextState<AndIfBottomLayerDataspaceState>();
+ }
+ [[nodiscard]] auto andIfMiddleLayerHasNoPreference() {
+ return andIfMiddleLayerIs(ui::Dataspace::UNKNOWN);
+ }
+ };
+
+ struct AndIfBottomLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, AndIfBottomLayerDataspaceState> {
+ [[nodiscard]] auto andIfBottomLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer1.mLayerFEState.dataspace = dataspace;
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ [[nodiscard]] auto andIfBottomLayerHasNoPreference() {
+ return andIfBottomLayerIs(ui::Dataspace::UNKNOWN);
+ }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::Dataspace dataspace) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(dataspace, _, _, _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfTopLayerDataspaceState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ noStrongLayerPrefenceUses_V0_SRGB) {
+ // If none of the layers indicate a preference, then V0_SRGB is the
+ // preferred choice (subject to additional checks).
+ verify().ifTopLayerHasNoPreference()
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerHasNoPreference()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::V0_SRGB)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifTopmostUses_DisplayP3_Then_DisplayP3_Chosen) {
+ // If only the topmost layer has a preference, then that is what is chosen.
+ verify().ifTopLayerIs(ui::Dataspace::DISPLAY_P3)
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerHasNoPreference()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifMiddleUses_DisplayP3_Then_DisplayP3_Chosen) {
+ // If only the middle layer has a preference, that that is what is chosen.
+ verify().ifTopLayerHasNoPreference()
+ .andIfMiddleLayerIs(ui::Dataspace::DISPLAY_P3)
+ .andIfBottomLayerHasNoPreference()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifBottomUses_DisplayP3_Then_DisplayP3_Chosen) {
+ // If only the middle layer has a preference, that that is what is chosen.
+ verify().ifTopLayerHasNoPreference()
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerIs(ui::Dataspace::DISPLAY_P3)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifTopUses_DisplayBT2020_AndBottomUses_DisplayP3_Then_DisplayBT2020_Chosen) {
+ // If multiple layers have a preference, the topmost value is what is used.
+ verify().ifTopLayerIs(ui::Dataspace::DISPLAY_BT2020)
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerIs(ui::Dataspace::DISPLAY_P3)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_BT2020)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifTopUses_DisplayP3_AndBottomUses_V0_SRGB_Then_DisplayP3_Chosen) {
+ // If multiple layers have a preference, the topmost value is what is used.
+ verify().ifTopLayerIs(ui::Dataspace::DISPLAY_P3)
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerIs(ui::Dataspace::DISPLAY_BT2020)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+struct OutputUpdateColorProfileTest_ForceOutputColorOverrides
+ : public OutputUpdateColorProfileTest {
+ // If CompositionRefreshArgs::forceOutputColorMode is set to some specific
+ // values, it overrides the layer dataspace choice.
+
+ OutputUpdateColorProfileTest_ForceOutputColorOverrides() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+
+ mLayer1.mLayerFEState.dataspace = ui::Dataspace::DISPLAY_BT2020;
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ }
+
+ struct IfForceOutputColorModeState
+ : public CallOrderStateMachineHelper<TestType, IfForceOutputColorModeState> {
+ [[nodiscard]] auto ifForceOutputColorMode(ui::ColorMode colorMode) {
+ getInstance()->mRefreshArgs.forceOutputColorMode = colorMode;
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ [[nodiscard]] auto ifNoOverride() { return ifForceOutputColorMode(ui::ColorMode::NATIVE); }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::Dataspace dataspace) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(dataspace, _, _, _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfForceOutputColorModeState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_ForceOutputColorOverrides, NoOverride_DoesNotOverride) {
+ // By default the layer state is used to set the preferred dataspace
+ verify().ifNoOverride()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_BT2020)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_ForceOutputColorOverrides, SRGB_Override_USES_V0_SRGB) {
+ // Setting ui::ColorMode::SRGB overrides it with ui::Dataspace::V0_SRGB
+ verify().ifForceOutputColorMode(ui::ColorMode::SRGB)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::V0_SRGB)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_ForceOutputColorOverrides, DisplayP3_Override_Uses_DisplayP3) {
+ // Setting ui::ColorMode::DISPLAY_P3 overrides it with ui::Dataspace::DISPLAY_P3
+ verify().ifForceOutputColorMode(ui::ColorMode::DISPLAY_P3)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+// HDR output requires all layers to be compatible with the chosen HDR
+// dataspace, along with there being proper support.
+struct OutputUpdateColorProfileTest_Hdr : public OutputUpdateColorProfileTest {
+ OutputUpdateColorProfileTest_Hdr() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ }
+
+ static constexpr ui::Dataspace kNonHdrDataspace = ui::Dataspace::DISPLAY_P3;
+ static constexpr ui::Dataspace BT2020_PQ = ui::Dataspace::BT2020_PQ;
+ static constexpr ui::Dataspace BT2020_HLG = ui::Dataspace::BT2020_HLG;
+ static constexpr ui::Dataspace DISPLAY_P3 = ui::Dataspace::DISPLAY_P3;
+
+ struct IfTopLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, IfTopLayerDataspaceState> {
+ [[nodiscard]] auto ifTopLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer2.mLayerFEState.dataspace = dataspace;
+ return nextState<AndTopLayerCompositionTypeState>();
+ }
+ [[nodiscard]] auto ifTopLayerIsNotHdr() { return ifTopLayerIs(kNonHdrDataspace); }
+ };
+
+ struct AndTopLayerCompositionTypeState
+ : public CallOrderStateMachineHelper<TestType, AndTopLayerCompositionTypeState> {
+ [[nodiscard]] auto andTopLayerIsREComposed(bool renderEngineComposed) {
+ getInstance()->mLayer2.mLayerFEState.forceClientComposition = renderEngineComposed;
+ return nextState<AndIfBottomLayerDataspaceState>();
+ }
+ };
+
+ struct AndIfBottomLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, AndIfBottomLayerDataspaceState> {
+ [[nodiscard]] auto andIfBottomLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer1.mLayerFEState.dataspace = dataspace;
+ return nextState<AndBottomLayerCompositionTypeState>();
+ }
+ [[nodiscard]] auto andIfBottomLayerIsNotHdr() {
+ return andIfBottomLayerIs(kNonHdrDataspace);
+ }
+ };
+
+ struct AndBottomLayerCompositionTypeState
+ : public CallOrderStateMachineHelper<TestType, AndBottomLayerCompositionTypeState> {
+ [[nodiscard]] auto andBottomLayerIsREComposed(bool renderEngineComposed) {
+ getInstance()->mLayer1.mLayerFEState.forceClientComposition = renderEngineComposed;
+ return nextState<AndIfHasLegacySupportState>();
+ }
+ };
+
+ struct AndIfHasLegacySupportState
+ : public CallOrderStateMachineHelper<TestType, AndIfHasLegacySupportState> {
+ [[nodiscard]] auto andIfLegacySupportFor(ui::Dataspace dataspace, bool legacySupport) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile, hasLegacyHdrSupport(dataspace))
+ .WillOnce(Return(legacySupport));
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::Dataspace dataspace) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(dataspace, _, _, _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfTopLayerDataspaceState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_PQ_HW_Uses_PQ) {
+ // If all layers use BT2020_PQ, and there are no other special conditions,
+ // BT2020_PQ is used.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_PQ_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_PQ is not used if there is only legacy support for it.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_PQ_RE_Uses_PQ) {
+ // BT2020_PQ is still used if the bottom layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_RE_On_PQ_HW_Uses_DisplayP3) {
+ // BT2020_PQ is not used if the top layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_HLG_HW_Uses_PQ) {
+ // If there is mixed HLG/PQ use, and the topmost layer is PQ, then PQ is used if there
+ // are no other special conditions.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_HLG_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_PQ is not used if there is only legacy support for it.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_HLG_RE_Uses_PQ) {
+ // BT2020_PQ is used if the bottom HLG layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_RE_On_HLG_HW_Uses_DisplayP3) {
+ // BT2020_PQ is not used if the top PQ layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_PQ_HW_Uses_PQ) {
+ // If there is mixed HLG/PQ use, and the topmost layer is HLG, then PQ is
+ // used if there are no other special conditions.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_PQ_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_PQ is not used if there is only legacy support for it.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_PQ_RE_Uses_DisplayP3) {
+ // BT2020_PQ is not used if the bottom PQ layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_RE_On_PQ_HW_Uses_PQ) {
+ // BT2020_PQ is still used if the top HLG layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_HLG_HW_Uses_HLG) {
+ // If all layers use HLG then HLG is used if there are no other special
+ // conditions.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_HLG_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_HLG is not used if there is legacy support for it.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_HLG, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_HLG_RE_Uses_HLG) {
+ // BT2020_HLG is used even if the bottom layer is client composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_RE_On_HLG_HW_Uses_HLG) {
+ // BT2020_HLG is used even if the top layer is client composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_NonHdr_HW_Uses_PQ) {
+ // Even if there are non-HDR layers present, BT2020_PQ can still be used.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIsNotHdr()
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_NonHdr_RE_Uses_HLG) {
+ // If all layers use HLG then HLG is used if there are no other special
+ // conditions.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIsNotHdr()
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+struct OutputUpdateColorProfile_AffectsChosenRenderIntentTest
+ : public OutputUpdateColorProfileTest {
+ // The various values for CompositionRefreshArgs::outputColorSetting affect
+ // the chosen renderIntent, along with whether the preferred dataspace is an
+ // HDR dataspace or not.
+
+ OutputUpdateColorProfile_AffectsChosenRenderIntentTest() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+ mLayer1.mLayerFEState.dataspace = ui::Dataspace::BT2020_PQ;
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ EXPECT_CALL(*mDisplayColorProfile, hasLegacyHdrSupport(ui::Dataspace::BT2020_PQ))
+ .WillRepeatedly(Return(false));
+ }
+
+ // The tests here involve enough state and GMock setup that using a mini-DSL
+ // makes the tests much more readable, and allows the test to focus more on
+ // the intent than on some of the details.
+
+ static constexpr ui::Dataspace kNonHdrDataspace = ui::Dataspace::DISPLAY_P3;
+ static constexpr ui::Dataspace kHdrDataspace = ui::Dataspace::BT2020_PQ;
+
+ struct IfDataspaceChosenState
+ : public CallOrderStateMachineHelper<TestType, IfDataspaceChosenState> {
+ [[nodiscard]] auto ifDataspaceChosenIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer1.mLayerFEState.dataspace = dataspace;
+ return nextState<AndOutputColorSettingState>();
+ }
+ [[nodiscard]] auto ifDataspaceChosenIsNonHdr() {
+ return ifDataspaceChosenIs(kNonHdrDataspace);
+ }
+ [[nodiscard]] auto ifDataspaceChosenIsHdr() { return ifDataspaceChosenIs(kHdrDataspace); }
+ };
+
+ struct AndOutputColorSettingState
+ : public CallOrderStateMachineHelper<TestType, AndOutputColorSettingState> {
+ [[nodiscard]] auto andOutputColorSettingIs(OutputColorSetting setting) {
+ getInstance()->mRefreshArgs.outputColorSetting = setting;
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::RenderIntent intent) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(getInstance()->mLayer1.mLayerFEState.dataspace, intent, _,
+ _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Tests call one of these two helper member functions to start using the
+ // mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfDataspaceChosenState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest,
+ Managed_NonHdr_Prefers_Colorimetric) {
+ verify().ifDataspaceChosenIsNonHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kManaged)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::COLORIMETRIC)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest,
+ Managed_Hdr_Prefers_ToneMapColorimetric) {
+ verify().ifDataspaceChosenIsHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kManaged)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::TONE_MAP_COLORIMETRIC)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest, Enhanced_NonHdr_Prefers_Enhance) {
+ verify().ifDataspaceChosenIsNonHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kEnhanced)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::ENHANCE)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest,
+ Enhanced_Hdr_Prefers_ToneMapEnhance) {
+ verify().ifDataspaceChosenIsHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kEnhanced)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::TONE_MAP_ENHANCE)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest, Vendor_NonHdr_Prefers_Vendor) {
+ verify().ifDataspaceChosenIsNonHdr()
+ .andOutputColorSettingIs(kVendorSpecifiedOutputColorSetting)
+ .thenExpectBestColorModeCallUses(
+ static_cast<ui::RenderIntent>(kVendorSpecifiedOutputColorSetting))
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest, Vendor_Hdr_Prefers_Vendor) {
+ verify().ifDataspaceChosenIsHdr()
+ .andOutputColorSettingIs(kVendorSpecifiedOutputColorSetting)
+ .thenExpectBestColorModeCallUses(
+ static_cast<ui::RenderIntent>(kVendorSpecifiedOutputColorSetting))
+ .execute();
+}
+
+/*
+ * Output::beginFrame()
+ */
+
+/*
+ * Output::devOptRepaintFlash()
+ */
+
+struct OutputDevOptRepaintFlashTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // Sets up the helper functions called by composeSurfaces to use a mock
+ // implementations.
+ MOCK_CONST_METHOD1(getDirtyRegion, Region(bool));
+ MOCK_METHOD1(composeSurfaces, std::optional<base::unique_fd>(const Region&));
+ MOCK_METHOD0(postFramebuffer, void());
+ MOCK_METHOD0(prepareFrame, void());
+ };
+
+ OutputDevOptRepaintFlashTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+ }
+
+ static const Region kEmptyRegion;
+ static const Region kNotEmptyRegion;
+
+ StrictMock<OutputPartialMock> mOutput;
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ CompositionRefreshArgs mRefreshArgs;
+};
+
+const Region OutputDevOptRepaintFlashTest::kEmptyRegion{Rect{0, 0, 0, 0}};
+const Region OutputDevOptRepaintFlashTest::kNotEmptyRegion{Rect{0, 0, 1, 1}};
+
+TEST_F(OutputDevOptRepaintFlashTest, doesNothingIfFlashDelayNotSet) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = {};
+ mRefreshArgs.repaintEverything = true;
+ mOutput.mState.isEnabled = true;
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+TEST_F(OutputDevOptRepaintFlashTest, postsAndPreparesANewFrameIfNotEnabled) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::microseconds(1);
+ mRefreshArgs.repaintEverything = true;
+ mOutput.mState.isEnabled = false;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, postFramebuffer());
+ EXPECT_CALL(mOutput, prepareFrame());
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+TEST_F(OutputDevOptRepaintFlashTest, postsAndPreparesANewFrameIfNotDirty) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::microseconds(1);
+ mRefreshArgs.repaintEverything = true;
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, getDirtyRegion(true)).WillOnce(Return(kEmptyRegion));
+ EXPECT_CALL(mOutput, postFramebuffer());
+ EXPECT_CALL(mOutput, prepareFrame());
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+TEST_F(OutputDevOptRepaintFlashTest, alsoComposesSurfacesAndQueuesABufferIfDirty) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::microseconds(1);
+ mRefreshArgs.repaintEverything = false;
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, getDirtyRegion(false)).WillOnce(Return(kNotEmptyRegion));
+ EXPECT_CALL(mOutput, composeSurfaces(RegionEq(kNotEmptyRegion)));
+ EXPECT_CALL(*mRenderSurface, queueBuffer(_));
+ EXPECT_CALL(mOutput, postFramebuffer());
+ EXPECT_CALL(mOutput, prepareFrame());
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+// TODO(b/144060211) - Add coverage
+
+/*
+ * Output::finishFrame()
+ */
+
+struct OutputFinishFrameTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // Sets up the helper functions called by composeSurfaces to use a mock
+ // implementations.
+ MOCK_METHOD1(composeSurfaces, std::optional<base::unique_fd>(const Region&));
+ MOCK_METHOD0(postFramebuffer, void());
+ };
+
+ OutputFinishFrameTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+ }
+
+ StrictMock<OutputPartialMock> mOutput;
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ CompositionRefreshArgs mRefreshArgs;
+};
+
+TEST_F(OutputFinishFrameTest, ifNotEnabledDoesNothing) {
+ mOutput.mState.isEnabled = false;
+
+ mOutput.finishFrame(mRefreshArgs);
+}
+
+TEST_F(OutputFinishFrameTest, takesEarlyOutifComposeSurfacesReturnsNoFence) {
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, composeSurfaces(RegionEq(Region::INVALID_REGION)));
+
+ mOutput.finishFrame(mRefreshArgs);
+}
+
+TEST_F(OutputFinishFrameTest, queuesBufferIfComposeSurfacesReturnsAFence) {
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, composeSurfaces(RegionEq(Region::INVALID_REGION)))
+ .WillOnce(Return(ByMove(base::unique_fd())));
+ EXPECT_CALL(*mRenderSurface, queueBuffer(_));
+
+ mOutput.finishFrame(mRefreshArgs);
+}
+
+/*
+ * Output::postFramebuffer()
+ */
+
+// TODO(b/144060211) - Add coverage
+
+/*
* Output::composeSurfaces()
*/
@@ -598,7 +1665,7 @@
static const Rect kDefaultOutputScissor;
static const mat4 kDefaultColorTransformMat;
- struct OutputPartialMock : public impl::Output {
+ struct OutputPartialMock : public OutputPartialMockBase {
// Sets up the helper functions called by composeSurfaces to use a mock
// implementations.
MOCK_CONST_METHOD0(getSkipColorTransform, bool());
@@ -607,29 +1674,6 @@
MOCK_METHOD2(appendRegionFlashRequests,
void(const Region&, std::vector<renderengine::LayerSettings>&));
MOCK_METHOD1(setExpensiveRenderingExpected, void(bool));
-
- // compositionengine::Output overrides
- const OutputCompositionState& getState() const override { return mState; }
- OutputCompositionState& editState() override { return mState; }
-
- // These need implementations though are not expected to be called.
- MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
- MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex,
- compositionengine::OutputLayer*(size_t));
- MOCK_METHOD3(ensureOutputLayer,
- compositionengine::OutputLayer*(
- std::optional<size_t>,
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD0(finalizePendingOutputLayers, void());
- MOCK_METHOD0(clearOutputLayers, void());
- MOCK_CONST_METHOD1(dumpState, void(std::string&));
- MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
- MOCK_METHOD2(injectOutputLayerForTest,
- compositionengine::OutputLayer*(
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
-
- impl::OutputCompositionState mState;
};
OutputComposeSurfacesTest() {
@@ -718,36 +1762,13 @@
*/
struct GenerateClientCompositionRequestsTest : public testing::Test {
- struct OutputPartialMock : public impl::Output {
+ struct OutputPartialMock : public OutputPartialMockBase {
// compositionengine::Output overrides
-
std::vector<renderengine::LayerSettings> generateClientCompositionRequests(
bool supportsProtectedContent, Region& clearRegion) override {
return impl::Output::generateClientCompositionRequests(supportsProtectedContent,
clearRegion);
}
-
- const OutputCompositionState& getState() const override { return mState; }
- OutputCompositionState& editState() override { return mState; }
-
- // These need implementations though are not expected to be called.
- MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
- MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex,
- compositionengine::OutputLayer*(size_t));
- MOCK_METHOD3(ensureOutputLayer,
- compositionengine::OutputLayer*(
- std::optional<size_t>,
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD0(finalizePendingOutputLayers, void());
- MOCK_METHOD0(clearOutputLayers, void());
- MOCK_CONST_METHOD1(dumpState, void(std::string&));
- MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
- MOCK_METHOD2(injectOutputLayerForTest,
- compositionengine::OutputLayer*(
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
-
- impl::OutputCompositionState mState;
};
GenerateClientCompositionRequestsTest() {
@@ -805,6 +1826,7 @@
EXPECT_CALL(leftOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(leftLayer, getFEState()).WillRepeatedly(ReturnRef(leftLayerFEState));
EXPECT_CALL(leftLayerFE, prepareClientComposition(_)).WillOnce(Return(leftLayerRESettings));
+ EXPECT_CALL(leftOutputLayer, editState()).WillRepeatedly(ReturnRef(leftOutputLayerState));
EXPECT_CALL(rightOutputLayer, getState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(rightOutputLayer, getLayer()).WillRepeatedly(ReturnRef(rightLayer));
@@ -813,6 +1835,7 @@
EXPECT_CALL(rightOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(rightLayer, getFEState()).WillRepeatedly(ReturnRef(rightLayerFEState));
EXPECT_CALL(rightLayerFE, prepareClientComposition(_)).WillOnce(Return(rightLayerRESettings));
+ EXPECT_CALL(rightOutputLayer, editState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
@@ -865,6 +1888,7 @@
EXPECT_CALL(outputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(layer, getFEState()).WillRepeatedly(ReturnRef(layerFEState));
EXPECT_CALL(layerFE, prepareClientComposition(_)).Times(0);
+ EXPECT_CALL(outputLayer, editState()).WillRepeatedly(ReturnRef(outputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u)).WillRepeatedly(Return(&outputLayer));
@@ -930,6 +1954,7 @@
EXPECT_CALL(leftOutputLayer, requiresClientComposition()).WillRepeatedly(Return(false));
EXPECT_CALL(leftOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(leftLayer, getFEState()).WillRepeatedly(ReturnRef(leftLayerFEState));
+ EXPECT_CALL(leftOutputLayer, editState()).WillRepeatedly(ReturnRef(leftOutputLayerState));
EXPECT_CALL(rightOutputLayer, getState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(rightOutputLayer, getLayer()).WillRepeatedly(ReturnRef(rightLayer));
@@ -938,6 +1963,7 @@
EXPECT_CALL(rightOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(rightLayer, getFEState()).WillRepeatedly(ReturnRef(rightLayerFEState));
EXPECT_CALL(rightLayerFE, prepareClientComposition(_)).WillOnce(Return(rightLayerRESettings));
+ EXPECT_CALL(rightOutputLayer, editState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
diff --git a/services/surfaceflinger/FrameTracer/FrameTracer.cpp b/services/surfaceflinger/FrameTracer/FrameTracer.cpp
index 3a0408e..6f91843 100644
--- a/services/surfaceflinger/FrameTracer/FrameTracer.cpp
+++ b/services/surfaceflinger/FrameTracer/FrameTracer.cpp
@@ -44,52 +44,52 @@
FrameTracerDataSource::Register(dsd);
}
-void FrameTracer::traceNewLayer(int32_t layerID, const std::string& layerName) {
- FrameTracerDataSource::Trace([this, layerID, &layerName](FrameTracerDataSource::TraceContext) {
- if (mTraceTracker.find(layerID) == mTraceTracker.end()) {
+void FrameTracer::traceNewLayer(int32_t layerId, const std::string& layerName) {
+ FrameTracerDataSource::Trace([this, layerId, &layerName](FrameTracerDataSource::TraceContext) {
+ if (mTraceTracker.find(layerId) == mTraceTracker.end()) {
std::lock_guard<std::mutex> lock(mTraceMutex);
- mTraceTracker[layerID].layerName = layerName;
+ mTraceTracker[layerId].layerName = layerName;
}
});
}
-void FrameTracer::traceTimestamp(int32_t layerID, uint64_t bufferID, uint64_t frameNumber,
+void FrameTracer::traceTimestamp(int32_t layerId, uint64_t bufferID, uint64_t frameNumber,
nsecs_t timestamp, FrameEvent::BufferEventType type,
nsecs_t duration) {
- FrameTracerDataSource::Trace([this, layerID, bufferID, frameNumber, timestamp, type,
+ FrameTracerDataSource::Trace([this, layerId, bufferID, frameNumber, timestamp, type,
duration](FrameTracerDataSource::TraceContext ctx) {
std::lock_guard<std::mutex> lock(mTraceMutex);
- if (mTraceTracker.find(layerID) == mTraceTracker.end()) {
+ if (mTraceTracker.find(layerId) == mTraceTracker.end()) {
return;
}
// Handle any pending fences for this buffer.
- tracePendingFencesLocked(ctx, layerID, bufferID);
+ tracePendingFencesLocked(ctx, layerId, bufferID);
// Complete current trace.
- traceLocked(ctx, layerID, bufferID, frameNumber, timestamp, type, duration);
+ traceLocked(ctx, layerId, bufferID, frameNumber, timestamp, type, duration);
});
}
-void FrameTracer::traceFence(int32_t layerID, uint64_t bufferID, uint64_t frameNumber,
+void FrameTracer::traceFence(int32_t layerId, uint64_t bufferID, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& fence,
FrameEvent::BufferEventType type, nsecs_t startTime) {
- FrameTracerDataSource::Trace([this, layerID, bufferID, frameNumber, &fence, type,
+ FrameTracerDataSource::Trace([this, layerId, bufferID, frameNumber, &fence, type,
startTime](FrameTracerDataSource::TraceContext ctx) {
const nsecs_t signalTime = fence->getSignalTime();
if (signalTime != Fence::SIGNAL_TIME_INVALID) {
std::lock_guard<std::mutex> lock(mTraceMutex);
- if (mTraceTracker.find(layerID) == mTraceTracker.end()) {
+ if (mTraceTracker.find(layerId) == mTraceTracker.end()) {
return;
}
// Handle any pending fences for this buffer.
- tracePendingFencesLocked(ctx, layerID, bufferID);
+ tracePendingFencesLocked(ctx, layerId, bufferID);
if (signalTime != Fence::SIGNAL_TIME_PENDING) {
- traceSpanLocked(ctx, layerID, bufferID, frameNumber, type, startTime, signalTime);
+ traceSpanLocked(ctx, layerId, bufferID, frameNumber, type, startTime, signalTime);
} else {
- mTraceTracker[layerID].pendingFences[bufferID].push_back(
+ mTraceTracker[layerId].pendingFences[bufferID].push_back(
{.frameNumber = frameNumber,
.type = type,
.fence = fence,
@@ -100,9 +100,9 @@
}
void FrameTracer::tracePendingFencesLocked(FrameTracerDataSource::TraceContext& ctx,
- int32_t layerID, uint64_t bufferID) {
- if (mTraceTracker[layerID].pendingFences.count(bufferID)) {
- auto& pendingFences = mTraceTracker[layerID].pendingFences[bufferID];
+ int32_t layerId, uint64_t bufferID) {
+ if (mTraceTracker[layerId].pendingFences.count(bufferID)) {
+ auto& pendingFences = mTraceTracker[layerId].pendingFences[bufferID];
for (size_t i = 0; i < pendingFences.size(); ++i) {
auto& pendingFence = pendingFences[i];
@@ -116,7 +116,7 @@
if (signalTime != Fence::SIGNAL_TIME_INVALID &&
systemTime() - signalTime < kFenceSignallingDeadline) {
- traceSpanLocked(ctx, layerID, bufferID, pendingFence.frameNumber, pendingFence.type,
+ traceSpanLocked(ctx, layerId, bufferID, pendingFence.frameNumber, pendingFence.type,
pendingFence.startTime, signalTime);
}
@@ -126,7 +126,7 @@
}
}
-void FrameTracer::traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+void FrameTracer::traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID, uint64_t frameNumber, nsecs_t timestamp,
FrameEvent::BufferEventType type, nsecs_t duration) {
auto packet = ctx.NewTracePacket();
@@ -138,9 +138,9 @@
}
event->set_type(type);
- if (mTraceTracker.find(layerID) != mTraceTracker.end() &&
- !mTraceTracker[layerID].layerName.empty()) {
- const std::string& layerName = mTraceTracker[layerID].layerName;
+ if (mTraceTracker.find(layerId) != mTraceTracker.end() &&
+ !mTraceTracker[layerId].layerName.empty()) {
+ const std::string& layerName = mTraceTracker[layerId].layerName;
event->set_layer_name(layerName.c_str(), layerName.size());
}
@@ -149,7 +149,7 @@
}
}
-void FrameTracer::traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+void FrameTracer::traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID, uint64_t frameNumber,
FrameEvent::BufferEventType type, nsecs_t startTime,
nsecs_t endTime) {
@@ -159,12 +159,12 @@
timestamp = startTime;
duration = endTime - startTime;
}
- traceLocked(ctx, layerID, bufferID, frameNumber, timestamp, type, duration);
+ traceLocked(ctx, layerId, bufferID, frameNumber, timestamp, type, duration);
}
-void FrameTracer::onDestroy(int32_t layerID) {
+void FrameTracer::onDestroy(int32_t layerId) {
std::lock_guard<std::mutex> traceLock(mTraceMutex);
- mTraceTracker.erase(layerID);
+ mTraceTracker.erase(layerId);
}
std::string FrameTracer::miniDump() {
diff --git a/services/surfaceflinger/FrameTracer/FrameTracer.h b/services/surfaceflinger/FrameTracer/FrameTracer.h
index e91a750..ef5df90 100644
--- a/services/surfaceflinger/FrameTracer/FrameTracer.h
+++ b/services/surfaceflinger/FrameTracer/FrameTracer.h
@@ -47,21 +47,21 @@
void registerDataSource();
// Starts tracking a new layer for tracing. Needs to be called once before traceTimestamp() or
// traceFence() for each layer.
- void traceNewLayer(int32_t layerID, const std::string& layerName);
+ void traceNewLayer(int32_t layerId, const std::string& layerName);
// Creates a trace point at the timestamp provided.
- void traceTimestamp(int32_t layerID, uint64_t bufferID, uint64_t frameNumber, nsecs_t timestamp,
+ void traceTimestamp(int32_t layerId, uint64_t bufferID, uint64_t frameNumber, nsecs_t timestamp,
FrameEvent::BufferEventType type, nsecs_t duration = 0);
// Creates a trace point after the provided fence has been signalled. If a startTime is provided
// the trace will have be timestamped from startTime until fence signalling time. If no
// startTime is provided, a durationless trace point will be created timestamped at fence
// signalling time. If the fence hasn't signalled yet, the trace point will be created the next
// time after signalling a trace call for this buffer occurs.
- void traceFence(int32_t layerID, uint64_t bufferID, uint64_t frameNumber,
+ void traceFence(int32_t layerId, uint64_t bufferID, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& fence, FrameEvent::BufferEventType type,
nsecs_t startTime = 0);
// Takes care of cleanup when a layer is destroyed.
- void onDestroy(int32_t layerID);
+ void onDestroy(int32_t layerId);
std::string miniDump();
@@ -88,15 +88,15 @@
// Checks if any pending fences for a layer and buffer have signalled and, if they have, creates
// trace points for them.
- void tracePendingFencesLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+ void tracePendingFencesLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID);
// Creates a trace point by translating a start time and an end time to a timestamp and
// duration. If startTime is later than end time it sets end time as the timestamp and the
// duration to 0. Used by traceFence().
- void traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+ void traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID, uint64_t frameNumber, FrameEvent::BufferEventType type,
nsecs_t startTime, nsecs_t endTime);
- void traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID, uint64_t bufferID,
+ void traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId, uint64_t bufferID,
uint64_t frameNumber, nsecs_t timestamp, FrameEvent::BufferEventType type,
nsecs_t duration = 0);
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 1d0ce10..ce9aab5 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1360,9 +1360,9 @@
void Layer::onDisconnect() {
Mutex::Autolock lock(mFrameEventHistoryMutex);
mFrameEventHistory.onDisconnect();
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
}
void Layer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
@@ -1370,6 +1370,8 @@
if (newTimestamps) {
mFlinger->mTimeStats->setPostTime(getSequence(), newTimestamps->frameNumber,
getName().c_str(), newTimestamps->postedTime);
+ mFlinger->mTimeStats->setAcquireFence(getSequence(), newTimestamps->frameNumber,
+ newTimestamps->acquireFence);
}
Mutex::Autolock lock(mFrameEventHistoryMutex);
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 1388612..286311b 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -531,7 +531,7 @@
* called after composition.
* returns true if the layer latched a new buffer this frame.
*/
- virtual bool onPostComposition(const std::optional<DisplayId>& /*displayId*/,
+ virtual bool onPostComposition(sp<const DisplayDevice> /*displayDevice*/,
const std::shared_ptr<FenceTime>& /*glDoneFence*/,
const std::shared_ptr<FenceTime>& /*presentFence*/,
const CompositorTiming& /*compositorTiming*/) {
@@ -601,6 +601,8 @@
virtual sp<GraphicBuffer> getBuffer() const { return nullptr; }
+ virtual uint64_t getCurrentFrameNumber() const { return mCurrentFrameNumber; }
+
/*
* Returns if a frame is ready
*/
diff --git a/services/surfaceflinger/Scheduler/Timer.cpp b/services/surfaceflinger/Scheduler/Timer.cpp
new file mode 100644
index 0000000..2394ed2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "SchedulerTimer"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <log/log.h>
+#include <sys/epoll.h>
+#include <sys/timerfd.h>
+#include <sys/unistd.h>
+#include <utils/Trace.h>
+#include <chrono>
+#include <cstdint>
+
+#include "Timer.h"
+
+namespace android::scheduler {
+
+static constexpr size_t kReadPipe = 0;
+static constexpr size_t kWritePipe = 1;
+
+template <class T, size_t N>
+constexpr size_t arrayLen(T (&)[N]) {
+ return N;
+}
+
+Timer::Timer()
+ : mTimerFd(timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK)),
+ mEpollFd(epoll_create1(EPOLL_CLOEXEC)) {
+ if (pipe2(mPipes.data(), O_CLOEXEC | O_NONBLOCK)) {
+ ALOGE("could not create TimerDispatch mPipes");
+ };
+
+ mDispatchThread = std::thread(std::bind(&Timer::dispatch, this));
+}
+
+Timer::~Timer() {
+ endDispatch();
+ mDispatchThread.join();
+
+ close(mPipes[kWritePipe]);
+ close(mPipes[kReadPipe]);
+ close(mEpollFd);
+ close(mTimerFd);
+}
+
+void Timer::endDispatch() {
+ static constexpr unsigned char end = 'e';
+ write(mPipes[kWritePipe], &end, sizeof(end));
+}
+
+nsecs_t Timer::now() const {
+ return systemTime(SYSTEM_TIME_MONOTONIC);
+}
+
+constexpr char const* timerTraceTag = "AlarmInNs";
+void Timer::alarmIn(std::function<void()> const& cb, nsecs_t fireIn) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, fireIn);
+
+ using namespace std::literals;
+ static constexpr int ns_per_s =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(1s).count();
+
+ mCallback = cb;
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {.tv_sec = static_cast<long>(fireIn / ns_per_s),
+ .tv_nsec = static_cast<long>(fireIn % ns_per_s)},
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to set timerfd");
+ }
+}
+
+void Timer::alarmCancel() {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, 0);
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {
+ .tv_sec = 0,
+ .tv_nsec = 0,
+ },
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to disarm timerfd");
+ }
+}
+
+void Timer::dispatch() {
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m) != 0) {
+ ALOGW("Failed to set SCHED_FIFO on dispatch thread");
+ }
+
+ if (pthread_setname_np(pthread_self(), "TimerDispatch")) {
+ ALOGW("Failed to set thread name on dispatch thread");
+ }
+
+ enum DispatchType : uint32_t { TIMER, TERMINATE, MAX_DISPATCH_TYPE };
+ epoll_event timerEvent;
+ timerEvent.events = EPOLLIN;
+ timerEvent.data.u32 = DispatchType::TIMER;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mTimerFd, &timerEvent) == -1) {
+ ALOGE("Error adding timer fd to epoll dispatch loop");
+ return;
+ }
+
+ epoll_event terminateEvent;
+ terminateEvent.events = EPOLLIN;
+ terminateEvent.data.u32 = DispatchType::TERMINATE;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mPipes[kReadPipe], &terminateEvent) == -1) {
+ ALOGE("Error adding control fd to dispatch loop");
+ return;
+ }
+
+ uint64_t iteration = 0;
+ char const traceNamePrefix[] = "TimerIteration #";
+ static constexpr size_t max64print = std::numeric_limits<decltype(iteration)>::digits10;
+ static constexpr size_t maxlen = arrayLen(traceNamePrefix) + max64print;
+ std::array<char, maxlen> str_buffer;
+ auto timing = true;
+ while (timing) {
+ epoll_event events[DispatchType::MAX_DISPATCH_TYPE];
+ int nfds = epoll_wait(mEpollFd, events, DispatchType::MAX_DISPATCH_TYPE, -1);
+
+ if (ATRACE_ENABLED()) {
+ snprintf(str_buffer.data(), str_buffer.size(), "%s%" PRIu64, traceNamePrefix,
+ iteration++);
+ ATRACE_NAME(str_buffer.data());
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR) {
+ timing = false;
+ continue;
+ }
+ }
+
+ for (auto i = 0; i < nfds; i++) {
+ if (events[i].data.u32 == DispatchType::TIMER) {
+ static uint64_t mIgnored = 0;
+ read(mTimerFd, &mIgnored, sizeof(mIgnored));
+ std::function<void()> cb;
+ {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ cb = mCallback;
+ }
+ if (cb) {
+ cb();
+ }
+ }
+ if (events[i].data.u32 == DispatchType::TERMINATE) {
+ timing = false;
+ }
+ }
+ }
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/Timer.h b/services/surfaceflinger/Scheduler/Timer.h
new file mode 100644
index 0000000..0ae82c8
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "TimeKeeper.h"
+
+#include <android-base/thread_annotations.h>
+#include <array>
+#include <thread>
+
+namespace android::scheduler {
+
+class Timer : public TimeKeeper {
+public:
+ Timer();
+ ~Timer();
+ nsecs_t now() const final;
+
+ // NB: alarmIn and alarmCancel are threadsafe; with the last-returning function being effectual
+ // Most users will want to serialize thes calls so as to be aware of the timer state.
+ void alarmIn(std::function<void()> const& cb, nsecs_t fireIn) final;
+ void alarmCancel() final;
+
+private:
+ int const mTimerFd;
+ int const mEpollFd;
+ std::array<int, 2> mPipes;
+
+ std::thread mDispatchThread;
+ void dispatch();
+ void endDispatch();
+
+ std::mutex mMutex;
+ std::function<void()> mCallback GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
new file mode 100644
index 0000000..643c5d2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+//#define LOG_NDEBUG 0
+#include "VSyncPredictor.h"
+#include <android-base/logging.h>
+#include <cutils/compiler.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <algorithm>
+#include <chrono>
+#include "SchedulerUtils.h"
+
+namespace android::scheduler {
+static auto constexpr kNeedsSamplesTag = "SamplesRequested";
+static auto constexpr kMaxPercent = 100u;
+
+VSyncPredictor::~VSyncPredictor() = default;
+
+VSyncPredictor::VSyncPredictor(nsecs_t idealPeriod, size_t historySize,
+ size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent)
+ : kHistorySize(historySize),
+ kMinimumSamplesForPrediction(minimumSamplesForPrediction),
+ kOutlierTolerancePercent(std::min(outlierTolerancePercent, kMaxPercent)),
+ mIdealPeriod(idealPeriod) {
+ mRateMap[mIdealPeriod] = {idealPeriod, 0};
+}
+
+inline size_t VSyncPredictor::next(int i) const {
+ return (i + 1) % timestamps.size();
+}
+
+bool VSyncPredictor::validate(nsecs_t timestamp) const {
+ if (lastTimestampIndex < 0 || timestamps.empty()) {
+ return true;
+ }
+
+ auto const aValidTimestamp = timestamps[lastTimestampIndex];
+ auto const percent = (timestamp - aValidTimestamp) % mIdealPeriod * kMaxPercent / mIdealPeriod;
+ return percent < kOutlierTolerancePercent || percent > (kMaxPercent - kOutlierTolerancePercent);
+}
+
+void VSyncPredictor::addVsyncTimestamp(nsecs_t timestamp) {
+ std::lock_guard<std::mutex> lk(mMutex);
+
+ if (!validate(timestamp)) {
+ ALOGW("timestamp was too far off the last known timestamp");
+ return;
+ }
+
+ if (timestamps.size() != kHistorySize) {
+ timestamps.push_back(timestamp);
+ lastTimestampIndex = next(lastTimestampIndex);
+ } else {
+ lastTimestampIndex = next(lastTimestampIndex);
+ timestamps[lastTimestampIndex] = timestamp;
+ }
+
+ if (timestamps.size() < kMinimumSamplesForPrediction) {
+ mRateMap[mIdealPeriod] = {mIdealPeriod, 0};
+ return;
+ }
+
+ // This is a 'simple linear regression' calculation of Y over X, with Y being the
+ // vsync timestamps, and X being the ordinal of vsync count.
+ // The calculated slope is the vsync period.
+ // Formula for reference:
+ // Sigma_i: means sum over all timestamps.
+ // mean(variable): statistical mean of variable.
+ // X: snapped ordinal of the timestamp
+ // Y: vsync timestamp
+ //
+ // Sigma_i( (X_i - mean(X)) * (Y_i - mean(Y) )
+ // slope = -------------------------------------------
+ // Sigma_i ( X_i - mean(X) ) ^ 2
+ //
+ // intercept = mean(Y) - slope * mean(X)
+ //
+ std::vector<nsecs_t> vsyncTS(timestamps.size());
+ std::vector<nsecs_t> ordinals(timestamps.size());
+
+ // normalizing to the oldest timestamp cuts down on error in calculating the intercept.
+ auto const oldest_ts = *std::min_element(timestamps.begin(), timestamps.end());
+ auto it = mRateMap.find(mIdealPeriod);
+ auto const currentPeriod = std::get<0>(it->second);
+ // TODO (b/144707443): its important that there's some precision in the mean of the ordinals
+ // for the intercept calculation, so scale the ordinals by 10 to continue
+ // fixed point calculation. Explore expanding
+ // scheduler::utils::calculate_mean to have a fixed point fractional part.
+ static constexpr int kScalingFactor = 10;
+
+ for (auto i = 0u; i < timestamps.size(); i++) {
+ vsyncTS[i] = timestamps[i] - oldest_ts;
+ ordinals[i] = ((vsyncTS[i] + (currentPeriod / 2)) / currentPeriod) * kScalingFactor;
+ }
+
+ auto meanTS = scheduler::calculate_mean(vsyncTS);
+ auto meanOrdinal = scheduler::calculate_mean(ordinals);
+ for (auto i = 0; i < vsyncTS.size(); i++) {
+ vsyncTS[i] -= meanTS;
+ ordinals[i] -= meanOrdinal;
+ }
+
+ auto top = 0ll;
+ auto bottom = 0ll;
+ for (auto i = 0; i < vsyncTS.size(); i++) {
+ top += vsyncTS[i] * ordinals[i];
+ bottom += ordinals[i] * ordinals[i];
+ }
+
+ if (CC_UNLIKELY(bottom == 0)) {
+ it->second = {mIdealPeriod, 0};
+ return;
+ }
+
+ nsecs_t const anticipatedPeriod = top / bottom * kScalingFactor;
+ nsecs_t const intercept = meanTS - (anticipatedPeriod * meanOrdinal / kScalingFactor);
+
+ it->second = {anticipatedPeriod, intercept};
+
+ ALOGV("model update ts: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, timestamp,
+ anticipatedPeriod, intercept);
+}
+
+nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const {
+ std::lock_guard<std::mutex> lk(mMutex);
+
+ auto const [slope, intercept] = getVSyncPredictionModel(lk);
+
+ if (timestamps.empty()) {
+ auto const knownTimestamp = mKnownTimestamp ? *mKnownTimestamp : timePoint;
+ auto const numPeriodsOut = ((timePoint - knownTimestamp) / mIdealPeriod) + 1;
+ return knownTimestamp + numPeriodsOut * mIdealPeriod;
+ }
+
+ auto const oldest = *std::min_element(timestamps.begin(), timestamps.end());
+ auto const ordinalRequest = (timePoint - oldest + slope) / slope;
+ auto const prediction = (ordinalRequest * slope) + intercept + oldest;
+
+ ALOGV("prediction made from: %" PRId64 " prediction: %" PRId64 " (+%" PRId64 ") slope: %" PRId64
+ " intercept: %" PRId64,
+ timePoint, prediction, prediction - timePoint, slope, intercept);
+ return prediction;
+}
+
+std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel() const {
+ std::lock_guard<std::mutex> lk(mMutex);
+ return VSyncPredictor::getVSyncPredictionModel(lk);
+}
+
+std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel(
+ std::lock_guard<std::mutex> const&) const {
+ return mRateMap.find(mIdealPeriod)->second;
+}
+
+void VSyncPredictor::setPeriod(nsecs_t period) {
+ ATRACE_CALL();
+
+ std::lock_guard<std::mutex> lk(mMutex);
+ static constexpr size_t kSizeLimit = 30;
+ if (CC_UNLIKELY(mRateMap.size() == kSizeLimit)) {
+ mRateMap.erase(mRateMap.begin());
+ }
+
+ mIdealPeriod = period;
+ if (mRateMap.find(period) == mRateMap.end()) {
+ mRateMap[mIdealPeriod] = {period, 0};
+ }
+
+ if (!timestamps.empty()) {
+ mKnownTimestamp = *std::max_element(timestamps.begin(), timestamps.end());
+ timestamps.clear();
+ lastTimestampIndex = 0;
+ }
+}
+
+bool VSyncPredictor::needsMoreSamples(nsecs_t now) const {
+ using namespace std::literals::chrono_literals;
+ std::lock_guard<std::mutex> lk(mMutex);
+ bool needsMoreSamples = true;
+ if (timestamps.size() >= kMinimumSamplesForPrediction) {
+ nsecs_t constexpr aLongTime =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(500ms).count();
+ if (!(lastTimestampIndex < 0 || timestamps.empty())) {
+ auto const lastTimestamp = timestamps[lastTimestampIndex];
+ needsMoreSamples = !((lastTimestamp + aLongTime) > now);
+ }
+ }
+
+ ATRACE_INT(kNeedsSamplesTag, needsMoreSamples);
+ return needsMoreSamples;
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
new file mode 100644
index 0000000..1590f49
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+#include "VSyncTracker.h"
+
+namespace android::scheduler {
+
+class VSyncPredictor : public VSyncTracker {
+public:
+ /*
+ * \param [in] idealPeriod The initial ideal period to use.
+ * \param [in] historySize The internal amount of entries to store in the model.
+ * \param [in] minimumSamplesForPrediction The minimum number of samples to collect before
+ * predicting. \param [in] outlierTolerancePercent a number 0 to 100 that will be used to filter
+ * samples that fall outlierTolerancePercent from an anticipated vsync event.
+ */
+ VSyncPredictor(nsecs_t idealPeriod, size_t historySize, size_t minimumSamplesForPrediction,
+ uint32_t outlierTolerancePercent);
+ ~VSyncPredictor();
+
+ void addVsyncTimestamp(nsecs_t timestamp) final;
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final;
+
+ /*
+ * Inform the model that the period is anticipated to change to a new value.
+ * model will use the period parameter to predict vsync events until enough
+ * timestamps with the new period have been collected.
+ *
+ * \param [in] period The new period that should be used.
+ */
+ void setPeriod(nsecs_t period);
+
+ /* Query if the model is in need of more samples to make a prediction at timePoint.
+ * \param [in] timePoint The timePoint to inquire of.
+ * \return True, if model would benefit from more samples, False if not.
+ */
+ bool needsMoreSamples(nsecs_t timePoint) const;
+
+ std::tuple<nsecs_t /* slope */, nsecs_t /* intercept */> getVSyncPredictionModel() const;
+
+private:
+ VSyncPredictor(VSyncPredictor const&) = delete;
+ VSyncPredictor& operator=(VSyncPredictor const&) = delete;
+
+ size_t const kHistorySize;
+ size_t const kMinimumSamplesForPrediction;
+ size_t const kOutlierTolerancePercent;
+
+ std::mutex mutable mMutex;
+ size_t next(int i) const REQUIRES(mMutex);
+ bool validate(nsecs_t timestamp) const REQUIRES(mMutex);
+ std::tuple<nsecs_t, nsecs_t> getVSyncPredictionModel(std::lock_guard<std::mutex> const&) const
+ REQUIRES(mMutex);
+
+ nsecs_t mIdealPeriod GUARDED_BY(mMutex);
+ std::optional<nsecs_t> mKnownTimestamp GUARDED_BY(mMutex);
+
+ std::unordered_map<nsecs_t, std::tuple<nsecs_t, nsecs_t>> mutable mRateMap GUARDED_BY(mMutex);
+
+ int lastTimestampIndex GUARDED_BY(mMutex) = 0;
+ std::vector<nsecs_t> timestamps GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 2fbef0b..1cc7d61 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1715,7 +1715,12 @@
refreshArgs.layersWithQueuedFrames.reserve(mLayersWithQueuedFrames.size());
for (sp<Layer> layer : mLayersWithQueuedFrames) {
auto compositionLayer = layer->getCompositionLayer();
- if (compositionLayer) refreshArgs.layersWithQueuedFrames.push_back(compositionLayer.get());
+ if (compositionLayer) {
+ refreshArgs.layersWithQueuedFrames.push_back(compositionLayer.get());
+ mFrameTracer->traceTimestamp(layer->getSequence(), layer->getCurrentBufferId(),
+ layer->getCurrentFrameNumber(), systemTime(),
+ FrameTracer::FrameEvent::HWC_COMPOSITION_QUEUED);
+ }
}
refreshArgs.repaintEverything = mRepaintEverything.exchange(false);
@@ -1896,9 +1901,8 @@
}
mDrawingState.traverseInZOrder([&](Layer* layer) {
- bool frameLatched =
- layer->onPostComposition(displayDevice->getId(), glCompositionDoneFenceTime,
- presentFenceTime, compositorTiming);
+ bool frameLatched = layer->onPostComposition(displayDevice, glCompositionDoneFenceTime,
+ presentFenceTime, compositorTiming);
if (frameLatched) {
recordBufferingStats(layer->getName(), layer->getOccupancyHistory(false));
}
@@ -3714,6 +3718,8 @@
if (currentMode == HWC_POWER_MODE_OFF) {
// Turn on the display
+ // TODO: @vhau temp fix only! See b/141111965
+ mTransactionCompletedThread.clearAllPending();
getHwComposer().setPowerMode(*displayId, mode);
if (display->isPrimary() && mode != HWC_POWER_MODE_DOZE_SUSPEND) {
setVsyncEnabledInHWC(*displayId, mHWCVsyncPendingState);
diff --git a/services/surfaceflinger/TimeStats/TimeStats.cpp b/services/surfaceflinger/TimeStats/TimeStats.cpp
index 3e47ec6..611afce 100644
--- a/services/surfaceflinger/TimeStats/TimeStats.cpp
+++ b/services/surfaceflinger/TimeStats/TimeStats.cpp
@@ -113,9 +113,9 @@
mTimeStats.clientCompositionFrames++;
}
-bool TimeStats::recordReadyLocked(int32_t layerID, TimeRecord* timeRecord) {
+bool TimeStats::recordReadyLocked(int32_t layerId, TimeRecord* timeRecord) {
if (!timeRecord->ready) {
- ALOGV("[%d]-[%" PRIu64 "]-presentFence is still not received", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-presentFence is still not received", layerId,
timeRecord->frameTime.frameNumber);
return false;
}
@@ -128,7 +128,7 @@
timeRecord->frameTime.acquireTime = timeRecord->acquireFence->getSignalTime();
timeRecord->acquireFence = nullptr;
} else {
- ALOGV("[%d]-[%" PRIu64 "]-acquireFence signal time is invalid", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-acquireFence signal time is invalid", layerId,
timeRecord->frameTime.frameNumber);
}
}
@@ -141,7 +141,7 @@
timeRecord->frameTime.presentTime = timeRecord->presentFence->getSignalTime();
timeRecord->presentFence = nullptr;
} else {
- ALOGV("[%d]-[%" PRIu64 "]-presentFence signal time invalid", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-presentFence signal time invalid", layerId,
timeRecord->frameTime.frameNumber);
}
}
@@ -155,15 +155,15 @@
return static_cast<int32_t>(delta);
}
-void TimeStats::flushAvailableRecordsToStatsLocked(int32_t layerID) {
+void TimeStats::flushAvailableRecordsToStatsLocked(int32_t layerId) {
ATRACE_CALL();
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
TimeRecord& prevTimeRecord = layerRecord.prevTimeRecord;
std::deque<TimeRecord>& timeRecords = layerRecord.timeRecords;
while (!timeRecords.empty()) {
- if (!recordReadyLocked(layerID, &timeRecords[0])) break;
- ALOGV("[%d]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerID,
+ if (!recordReadyLocked(layerId, &timeRecords[0])) break;
+ ALOGV("[%d]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerId,
timeRecords[0].frameTime.frameNumber, timeRecords[0].frameTime.presentTime);
if (prevTimeRecord.ready) {
@@ -178,37 +178,37 @@
const int32_t postToAcquireMs = msBetween(timeRecords[0].frameTime.postTime,
timeRecords[0].frameTime.acquireTime);
- ALOGV("[%d]-[%" PRIu64 "]-post2acquire[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-post2acquire[%d]", layerId,
timeRecords[0].frameTime.frameNumber, postToAcquireMs);
timeStatsLayer.deltas["post2acquire"].insert(postToAcquireMs);
const int32_t postToPresentMs = msBetween(timeRecords[0].frameTime.postTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-post2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-post2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, postToPresentMs);
timeStatsLayer.deltas["post2present"].insert(postToPresentMs);
const int32_t acquireToPresentMs = msBetween(timeRecords[0].frameTime.acquireTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-acquire2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-acquire2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, acquireToPresentMs);
timeStatsLayer.deltas["acquire2present"].insert(acquireToPresentMs);
const int32_t latchToPresentMs = msBetween(timeRecords[0].frameTime.latchTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-latch2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-latch2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, latchToPresentMs);
timeStatsLayer.deltas["latch2present"].insert(latchToPresentMs);
const int32_t desiredToPresentMs = msBetween(timeRecords[0].frameTime.desiredTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-desired2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-desired2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, desiredToPresentMs);
timeStatsLayer.deltas["desired2present"].insert(desiredToPresentMs);
const int32_t presentToPresentMs = msBetween(prevTimeRecord.frameTime.presentTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-present2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-present2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, presentToPresentMs);
timeStatsLayer.deltas["present2present"].insert(presentToPresentMs);
}
@@ -227,28 +227,28 @@
layerName.compare(0, kMinLenLayerName, kPopupWindowPrefix) != 0;
}
-void TimeStats::setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName,
+void TimeStats::setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
nsecs_t postTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-[%s]-PostTime[%" PRId64 "]", layerID, frameNumber, layerName.c_str(),
+ ALOGV("[%d]-[%" PRIu64 "]-[%s]-PostTime[%" PRId64 "]", layerId, frameNumber, layerName.c_str(),
postTime);
std::lock_guard<std::mutex> lock(mMutex);
if (!mTimeStats.stats.count(layerName) && mTimeStats.stats.size() >= MAX_NUM_LAYER_STATS) {
return;
}
- if (!mTimeStatsTracker.count(layerID) && mTimeStatsTracker.size() < MAX_NUM_LAYER_RECORDS &&
+ if (!mTimeStatsTracker.count(layerId) && mTimeStatsTracker.size() < MAX_NUM_LAYER_RECORDS &&
layerNameIsValid(layerName)) {
- mTimeStatsTracker[layerID].layerName = layerName;
+ mTimeStatsTracker[layerId].layerName = layerName;
}
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.timeRecords.size() == MAX_NUM_TIME_RECORDS) {
ALOGE("[%d]-[%s]-timeRecords is at its maximum size[%zu]. Ignore this when unittesting.",
- layerID, layerRecord.layerName.c_str(), MAX_NUM_TIME_RECORDS);
- mTimeStatsTracker.erase(layerID);
+ layerId, layerRecord.layerName.c_str(), MAX_NUM_TIME_RECORDS);
+ mTimeStatsTracker.erase(layerId);
return;
}
// For most media content, the acquireFence is invalid because the buffer is
@@ -270,15 +270,15 @@
layerRecord.waitData = layerRecord.timeRecords.size() - 1;
}
-void TimeStats::setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime) {
+void TimeStats::setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerID, frameNumber, latchTime);
+ ALOGV("[%d]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerId, frameNumber, latchTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -288,15 +288,15 @@
}
}
-void TimeStats::setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime) {
+void TimeStats::setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerID, frameNumber, desiredTime);
+ ALOGV("[%d]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerId, frameNumber, desiredTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -306,15 +306,15 @@
}
}
-void TimeStats::setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime) {
+void TimeStats::setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerID, frameNumber, acquireTime);
+ ALOGV("[%d]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerId, frameNumber, acquireTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -324,17 +324,17 @@
}
}
-void TimeStats::setAcquireFence(int32_t layerID, uint64_t frameNumber,
+void TimeStats::setAcquireFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& acquireFence) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerID, frameNumber,
+ ALOGV("[%d]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerId, frameNumber,
acquireFence->getSignalTime());
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -344,15 +344,15 @@
}
}
-void TimeStats::setPresentTime(int32_t layerID, uint64_t frameNumber, nsecs_t presentTime) {
+void TimeStats::setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerID, frameNumber, presentTime);
+ ALOGV("[%d]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerId, frameNumber, presentTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -363,20 +363,20 @@
layerRecord.waitData++;
}
- flushAvailableRecordsToStatsLocked(layerID);
+ flushAvailableRecordsToStatsLocked(layerId);
}
-void TimeStats::setPresentFence(int32_t layerID, uint64_t frameNumber,
+void TimeStats::setPresentFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& presentFence) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerID, frameNumber,
+ ALOGV("[%d]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerId, frameNumber,
presentFence->getSignalTime());
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -387,25 +387,25 @@
layerRecord.waitData++;
}
- flushAvailableRecordsToStatsLocked(layerID);
+ flushAvailableRecordsToStatsLocked(layerId);
}
-void TimeStats::onDestroy(int32_t layerID) {
+void TimeStats::onDestroy(int32_t layerId) {
ATRACE_CALL();
- ALOGV("[%d]-onDestroy", layerID);
+ ALOGV("[%d]-onDestroy", layerId);
std::lock_guard<std::mutex> lock(mMutex);
- mTimeStatsTracker.erase(layerID);
+ mTimeStatsTracker.erase(layerId);
}
-void TimeStats::removeTimeRecord(int32_t layerID, uint64_t frameNumber) {
+void TimeStats::removeTimeRecord(int32_t layerId, uint64_t frameNumber) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-removeTimeRecord", layerID, frameNumber);
+ ALOGV("[%d]-[%" PRIu64 "]-removeTimeRecord", layerId, frameNumber);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
size_t removeAt = 0;
for (const TimeRecord& record : layerRecord.timeRecords) {
if (record.frameTime.frameNumber == frameNumber) break;
diff --git a/services/surfaceflinger/TimeStats/TimeStats.h b/services/surfaceflinger/TimeStats/TimeStats.h
index 1313132..6e71f5a 100644
--- a/services/surfaceflinger/TimeStats/TimeStats.h
+++ b/services/surfaceflinger/TimeStats/TimeStats.h
@@ -44,20 +44,20 @@
virtual void incrementMissedFrames() = 0;
virtual void incrementClientCompositionFrames() = 0;
- virtual void setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName,
+ virtual void setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
nsecs_t postTime) = 0;
- virtual void setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime) = 0;
- virtual void setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime) = 0;
- virtual void setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime) = 0;
- virtual void setAcquireFence(int32_t layerID, uint64_t frameNumber,
+ virtual void setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) = 0;
+ virtual void setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) = 0;
+ virtual void setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) = 0;
+ virtual void setAcquireFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& acquireFence) = 0;
- virtual void setPresentTime(int32_t layerID, uint64_t frameNumber, nsecs_t presentTime) = 0;
- virtual void setPresentFence(int32_t layerID, uint64_t frameNumber,
+ virtual void setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime) = 0;
+ virtual void setPresentFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& presentFence) = 0;
// Clean up the layer record
- virtual void onDestroy(int32_t layerID) = 0;
+ virtual void onDestroy(int32_t layerId) = 0;
// If SF skips or rejects a buffer, remove the corresponding TimeRecord.
- virtual void removeTimeRecord(int32_t layerID, uint64_t frameNumber) = 0;
+ virtual void removeTimeRecord(int32_t layerId, uint64_t frameNumber) = 0;
virtual void setPowerMode(int32_t powerMode) = 0;
// Source of truth is RefrehRateStats.
@@ -116,20 +116,20 @@
void incrementMissedFrames() override;
void incrementClientCompositionFrames() override;
- void setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName,
+ void setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
nsecs_t postTime) override;
- void setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime) override;
- void setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime) override;
- void setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime) override;
- void setAcquireFence(int32_t layerID, uint64_t frameNumber,
+ void setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) override;
+ void setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) override;
+ void setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) override;
+ void setAcquireFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& acquireFence) override;
- void setPresentTime(int32_t layerID, uint64_t frameNumber, nsecs_t presentTime) override;
- void setPresentFence(int32_t layerID, uint64_t frameNumber,
+ void setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime) override;
+ void setPresentFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& presentFence) override;
// Clean up the layer record
- void onDestroy(int32_t layerID) override;
+ void onDestroy(int32_t layerId) override;
// If SF skips or rejects a buffer, remove the corresponding TimeRecord.
- void removeTimeRecord(int32_t layerID, uint64_t frameNumber) override;
+ void removeTimeRecord(int32_t layerId, uint64_t frameNumber) override;
void setPowerMode(int32_t powerMode) override;
// Source of truth is RefrehRateStats.
@@ -139,8 +139,8 @@
static const size_t MAX_NUM_TIME_RECORDS = 64;
private:
- bool recordReadyLocked(int32_t layerID, TimeRecord* timeRecord);
- void flushAvailableRecordsToStatsLocked(int32_t layerID);
+ bool recordReadyLocked(int32_t layerId, TimeRecord* timeRecord);
+ void flushAvailableRecordsToStatsLocked(int32_t layerId);
void flushPowerTimeLocked();
void flushAvailableGlobalRecordsToStatsLocked();
@@ -152,7 +152,7 @@
std::atomic<bool> mEnabled = false;
std::mutex mMutex;
TimeStatsHelper::TimeStatsGlobal mTimeStats;
- // Hashmap for LayerRecord with layerID as the hash key
+ // Hashmap for LayerRecord with layerId as the hash key
std::unordered_map<int32_t, LayerRecord> mTimeStatsTracker;
PowerTime mPowerTime;
GlobalRecord mGlobalRecord;
diff --git a/services/surfaceflinger/TransactionCompletedThread.cpp b/services/surfaceflinger/TransactionCompletedThread.cpp
index 8db03db..c15355d 100644
--- a/services/surfaceflinger/TransactionCompletedThread.cpp
+++ b/services/surfaceflinger/TransactionCompletedThread.cpp
@@ -189,6 +189,15 @@
return NO_ERROR;
}
+void TransactionCompletedThread::clearAllPending() {
+ std::lock_guard lock(mMutex);
+ if (!mRunning) {
+ return;
+ }
+ mPendingTransactions.clear();
+ mConditionVariable.notify_all();
+}
+
status_t TransactionCompletedThread::registerUnpresentedCallbackHandle(
const sp<CallbackHandle>& handle) {
std::lock_guard lock(mMutex);
diff --git a/services/surfaceflinger/TransactionCompletedThread.h b/services/surfaceflinger/TransactionCompletedThread.h
index 12ea8fe..cd95bfb 100644
--- a/services/surfaceflinger/TransactionCompletedThread.h
+++ b/services/surfaceflinger/TransactionCompletedThread.h
@@ -70,6 +70,8 @@
// Notifies the TransactionCompletedThread that a pending CallbackHandle has been presented.
status_t finalizePendingCallbackHandles(const std::deque<sp<CallbackHandle>>& handles);
+ void clearAllPending();
+
// Adds the Transaction CallbackHandle from a layer that does not need to be relatched and
// presented this frame.
status_t registerUnpresentedCallbackHandle(const sp<CallbackHandle>& handle);
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index 78114a1..0c4a752 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -55,6 +55,8 @@
"TransactionApplicationTest.cpp",
"StrongTypingTest.cpp",
"VSyncDispatchTimerQueueTest.cpp",
+ "VSyncDispatchRealtimeTest.cpp",
+ "VSyncPredictorTest.cpp",
"mock/DisplayHardware/MockComposer.cpp",
"mock/DisplayHardware/MockDisplay.cpp",
"mock/DisplayHardware/MockPowerAdvisor.cpp",
diff --git a/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp b/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp
index b5af591..c334bcf 100644
--- a/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp
@@ -82,8 +82,8 @@
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
- mFrameTracer->traceNewLayer(layerID, layerName);
+ const int32_t layerId = 5;
+ mFrameTracer->traceNewLayer(layerId, layerName);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
@@ -92,7 +92,7 @@
tracingSession->StartBlocking();
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
- mFrameTracer->traceNewLayer(layerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 1\n");
tracingSession->StopBlocking();
@@ -103,31 +103,31 @@
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
- const int32_t secondLayerID = 6;
+ const int32_t layerId = 5;
+ const int32_t secondlayerId = 6;
auto tracingSession = getTracingSessionForTest();
tracingSession->StartBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceNewLayer(secondLayerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceNewLayer(secondlayerId, layerName);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 2\n");
tracingSession->StopBlocking();
- mFrameTracer->onDestroy(layerID);
+ mFrameTracer->onDestroy(layerId);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 1\n");
- mFrameTracer->onDestroy(layerID);
+ mFrameTracer->onDestroy(layerId);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 1\n");
- mFrameTracer->onDestroy(secondLayerID);
+ mFrameTracer->onDestroy(secondlayerId);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
}
TEST_F(FrameTracerTest, canTraceAfterAddingLayer) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 1;
+ const int32_t layerId = 1;
const uint32_t bufferID = 2;
const uint64_t frameNumber = 3;
const nsecs_t timestamp = 4;
@@ -141,9 +141,9 @@
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceTimestamp(layerID, bufferID, frameNumber, timestamp, type, duration);
+ mFrameTracer->traceTimestamp(layerId, bufferID, frameNumber, timestamp, type, duration);
// Create second trace packet to finalize the previous one.
- mFrameTracer->traceTimestamp(layerID, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -157,10 +157,10 @@
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceTimestamp(layerID, bufferID, frameNumber, timestamp, type, duration);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceTimestamp(layerId, bufferID, frameNumber, timestamp, type, duration);
// Create second trace packet to finalize the previous one.
- mFrameTracer->traceTimestamp(layerID, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -191,7 +191,7 @@
TEST_F(FrameTracerTest, traceFenceTriggersOnNextTraceAfterFenceFired) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -204,10 +204,10 @@
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
// Trace.
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fenceTime, type);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fenceTime, type);
// Create extra trace packet to (hopefully not) trigger and finalize the fence packet.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
EXPECT_EQ(raw_trace.size(), 0);
@@ -219,12 +219,12 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fenceTime, type);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fenceTime, type);
const nsecs_t timestamp = systemTime();
fenceFactory.signalAllForTest(Fence::NO_FENCE, timestamp);
// Create extra trace packet to trigger and finalize fence trace packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -254,7 +254,7 @@
TEST_F(FrameTracerTest, traceFenceWithStartTimeAfterSignalTime_ShouldHaveNoDuration) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -264,24 +264,24 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
// traceFence called after fence signalled.
const nsecs_t signalTime1 = systemTime();
const nsecs_t startTime1 = signalTime1 + 100000;
auto fence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime1);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence1, type, startTime1);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence1, type, startTime1);
// traceFence called before fence signalled.
const nsecs_t signalTime2 = systemTime();
const nsecs_t startTime2 = signalTime2 + 100000;
auto fence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence2, type, startTime2);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence2, type, startTime2);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime2);
// Create extra trace packet to trigger and finalize fence trace packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -309,7 +309,7 @@
TEST_F(FrameTracerTest, traceFenceOlderThanDeadline_ShouldBeIgnored) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -321,11 +321,11 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence, type);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence, type);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime);
// Create extra trace packet to trigger and finalize any previous fence packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -334,7 +334,7 @@
TEST_F(FrameTracerTest, traceFenceWithValidStartTime_ShouldHaveCorrectDuration) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -345,24 +345,24 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
// traceFence called after fence signalled.
const nsecs_t signalTime1 = systemTime();
const nsecs_t startTime1 = signalTime1 - duration;
auto fence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime1);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence1, type, startTime1);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence1, type, startTime1);
// traceFence called before fence signalled.
const nsecs_t signalTime2 = systemTime();
const nsecs_t startTime2 = signalTime2 - duration;
auto fence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence2, type, startTime2);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence2, type, startTime2);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime2);
// Create extra trace packet to trigger and finalize fence trace packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
diff --git a/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp b/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp
index 4eb9ec3..7b60fa2 100644
--- a/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp
@@ -170,8 +170,8 @@
return result;
}
-static std::string genLayerName(int32_t layerID) {
- return (layerID < 0 ? "PopupWindow:b54fcd1#0" : "com.dummy#") + std::to_string(layerID);
+static std::string genLayerName(int32_t layerId) {
+ return (layerId < 0 ? "PopupWindow:b54fcd1#0" : "com.dummy#") + std::to_string(layerId);
}
void TimeStatsTest::setTimeStamp(TimeStamp type, int32_t id, uint64_t frameNumber, nsecs_t ts) {
@@ -560,22 +560,22 @@
EXPECT_TRUE(inputCommand(InputCommand::ENABLE, FMT_STRING).empty());
for (size_t i = 0; i < 10000000; ++i) {
- const int32_t layerID = genRandomInt32(-1, 10);
+ const int32_t layerId = genRandomInt32(-1, 10);
const int32_t frameNumber = genRandomInt32(1, 10);
switch (genRandomInt32(0, 100)) {
case 0:
ALOGV("removeTimeRecord");
- ASSERT_NO_FATAL_FAILURE(mTimeStats->removeTimeRecord(layerID, frameNumber));
+ ASSERT_NO_FATAL_FAILURE(mTimeStats->removeTimeRecord(layerId, frameNumber));
continue;
case 1:
ALOGV("onDestroy");
- ASSERT_NO_FATAL_FAILURE(mTimeStats->onDestroy(layerID));
+ ASSERT_NO_FATAL_FAILURE(mTimeStats->onDestroy(layerId));
continue;
}
TimeStamp type = static_cast<TimeStamp>(genRandomInt32(TIME_STAMP_BEGIN, TIME_STAMP_END));
const int32_t ts = genRandomInt32(1, 1000000000);
- ALOGV("type[%d], layerID[%d], frameNumber[%d], ts[%d]", type, layerID, frameNumber, ts);
- setTimeStamp(type, layerID, frameNumber, ts);
+ ALOGV("type[%d], layerId[%d], frameNumber[%d], ts[%d]", type, layerId, frameNumber, ts);
+ setTimeStamp(type, layerId, frameNumber, ts);
}
}
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
new file mode 100644
index 0000000..c012616
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Scheduler/TimeKeeper.h"
+#include "Scheduler/Timer.h"
+#include "Scheduler/VSyncDispatchTimerQueue.h"
+#include "Scheduler/VSyncTracker.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <thread>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+template <typename Rep, typename Per>
+constexpr nsecs_t toNs(std::chrono::duration<Rep, Per> const& tp) {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(tp).count();
+}
+
+class FixedRateIdealStubTracker : public VSyncTracker {
+public:
+ FixedRateIdealStubTracker() : mPeriod{toNs(3ms)} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final {
+ auto const floor = timePoint % mPeriod;
+ if (floor == 0) {
+ return timePoint;
+ }
+ return timePoint - floor + mPeriod;
+ }
+
+private:
+ nsecs_t const mPeriod;
+};
+
+class VRRStubTracker : public VSyncTracker {
+public:
+ VRRStubTracker(nsecs_t period) : mPeriod{period} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t time_point) const final {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ auto const normalized_to_base = time_point - mBase;
+ auto const floor = (normalized_to_base) % mPeriod;
+ if (floor == 0) {
+ return time_point;
+ }
+ return normalized_to_base - floor + mPeriod + mBase;
+ }
+
+ void set_interval(nsecs_t interval, nsecs_t last_known) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mPeriod = interval;
+ mBase = last_known;
+ }
+
+private:
+ std::mutex mutable mMutex;
+ nsecs_t mPeriod;
+ nsecs_t mBase = 0;
+};
+
+struct VSyncDispatchRealtimeTest : testing::Test {
+ static nsecs_t constexpr mDispatchGroupThreshold = toNs(100us);
+ static size_t constexpr mIterations = 20;
+};
+
+class RepeatingCallbackReceiver {
+public:
+ RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t wl)
+ : mWorkload(wl),
+ mCallback(
+ dispatch, [&](auto time) { callback_called(time); }, "repeat0") {}
+
+ void repeatedly_schedule(size_t iterations, std::function<void(nsecs_t)> const& onEachFrame) {
+ mCallbackTimes.reserve(iterations);
+ mCallback.schedule(mWorkload, systemTime(SYSTEM_TIME_MONOTONIC) + mWorkload);
+
+ for (auto i = 0u; i < iterations - 1; i++) {
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ mCalled = false;
+ auto last = mLastTarget;
+ lk.unlock();
+
+ onEachFrame(last);
+
+ mCallback.schedule(mWorkload, last + mWorkload);
+ }
+
+ // wait for the last callback.
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ }
+
+ void with_callback_times(std::function<void(std::vector<nsecs_t> const&)> const& fn) const {
+ fn(mCallbackTimes);
+ }
+
+private:
+ void callback_called(nsecs_t time) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mCallbackTimes.push_back(time);
+ mCalled = true;
+ mLastTarget = time;
+ mCv.notify_all();
+ }
+
+ nsecs_t const mWorkload;
+ VSyncCallbackRegistration mCallback;
+
+ std::mutex mMutex;
+ std::condition_variable mCv;
+ bool mCalled = false;
+ nsecs_t mLastTarget = 0;
+ std::vector<nsecs_t> mCallbackTimes;
+};
+
+TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
+ FixedRateIdealStubTracker tracker;
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ static size_t constexpr num_clients = 3;
+ std::array<RepeatingCallbackReceiver, num_clients>
+ cb_receiver{RepeatingCallbackReceiver(dispatch, toNs(1500us)),
+ RepeatingCallbackReceiver(dispatch, toNs(0h)),
+ RepeatingCallbackReceiver(dispatch, toNs(1ms))};
+
+ auto const on_each_frame = [](nsecs_t) {};
+ std::array<std::thread, num_clients> threads{
+ std::thread([&] { cb_receiver[0].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[1].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[2].repeatedly_schedule(mIterations, on_each_frame); }),
+ };
+
+ for (auto it = threads.rbegin(); it != threads.rend(); it++) {
+ it->join();
+ }
+
+ for (auto const& cbs : cb_receiver) {
+ cbs.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+ }
+}
+
+// starts at 333hz, slides down to 43hz
+TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
+ auto next_vsync_interval = toNs(3ms);
+ VRRStubTracker tracker(next_vsync_interval);
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
+ };
+
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+
+// starts at 333hz, jumps to 200hz at frame 10
+TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
+ VRRStubTracker tracker(toNs(3ms));
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto jump_frame_counter = 0u;
+ auto constexpr jump_frame_at = 10u;
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ if (jump_frame_counter++ == jump_frame_at) {
+ tracker.set_interval(toNs(5ms), last_known);
+ }
+ };
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
new file mode 100644
index 0000000..d0c8090
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "LibSurfaceFlingerUnittests"
+#define LOG_NDEBUG 0
+
+#include "Scheduler/VSyncPredictor.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <algorithm>
+#include <chrono>
+#include <utility>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+MATCHER_P2(IsCloseTo, value, tolerance, "is within tolerance") {
+ return arg <= value + tolerance && value >= value - tolerance;
+}
+
+std::vector<nsecs_t> generateVsyncTimestamps(size_t count, nsecs_t period, nsecs_t bias) {
+ std::vector<nsecs_t> vsyncs(count);
+ std::generate(vsyncs.begin(), vsyncs.end(),
+ [&, n = 0]() mutable { return n++ * period + bias; });
+ return vsyncs;
+}
+
+struct VSyncPredictorTest : testing::Test {
+ nsecs_t mNow = 0;
+ nsecs_t mPeriod = 1000;
+ static constexpr size_t kHistorySize = 10;
+ static constexpr size_t kMinimumSamplesForPrediction = 6;
+ static constexpr size_t kOutlierTolerancePercent = 25;
+ static constexpr nsecs_t mMaxRoundingError = 100;
+
+ VSyncPredictor tracker{mPeriod, kHistorySize, kMinimumSamplesForPrediction,
+ kOutlierTolerancePercent};
+};
+
+TEST_F(VSyncPredictorTest, reportsAnticipatedPeriod) {
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+
+ EXPECT_THAT(slope, Eq(mPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ auto const changedPeriod = 2000;
+ tracker.setPeriod(changedPeriod);
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(changedPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+}
+
+TEST_F(VSyncPredictorTest, reportsSamplesNeededWhenHasNoDataPoints) {
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow += mPeriod));
+ tracker.addVsyncTimestamp(mNow);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+}
+
+TEST_F(VSyncPredictorTest, reportsSamplesNeededAfterExplicitRateChange) {
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(mNow += mPeriod);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+
+ auto const changedPeriod = mPeriod * 2;
+ tracker.setPeriod(changedPeriod);
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow));
+
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow += changedPeriod));
+ tracker.addVsyncTimestamp(mNow);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+}
+
+TEST_F(VSyncPredictorTest, transitionsToModelledPointsAfterSynthetic) {
+ auto last = mNow;
+ auto const bias = 10;
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(last + mPeriod));
+ mNow += mPeriod - bias;
+ last = mNow;
+ tracker.addVsyncTimestamp(mNow);
+ mNow += bias;
+ }
+
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + mPeriod - bias));
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 100), Eq(mNow + mPeriod - bias));
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 990), Eq(mNow + 2 * mPeriod - bias));
+}
+
+TEST_F(VSyncPredictorTest, uponNotifiedOfInaccuracyUsesSynthetic) {
+ auto const slightlyLessPeriod = mPeriod - 10;
+ auto const changedPeriod = mPeriod - 1;
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(mNow += slightlyLessPeriod);
+ }
+
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + slightlyLessPeriod));
+ tracker.setPeriod(changedPeriod);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + changedPeriod));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelines_60hzHighVariance) {
+ // these are precomputed simulated 16.6s vsyncs with uniform distribution +/- 1.6ms error
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 15492949, 32325658, 49534984, 67496129, 84652891,
+ 100332564, 117737004, 132125931, 149291099, 165199602,
+ };
+ auto constexpr idealPeriod = 16600000;
+ auto constexpr expectedPeriod = 16639242;
+ auto constexpr expectedIntercept = 1049341;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelines_90hzLowVariance) {
+ // these are precomputed simulated 11.1 vsyncs with uniform distribution +/- 1ms error
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 11167047, 22603464, 32538479, 44938134, 56321268,
+ 66730346, 78062637, 88171429, 99707843, 111397621,
+ };
+ auto idealPeriod = 11110000;
+ auto expectedPeriod = 11089413;
+ auto expectedIntercept = 94421;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelinesDiscontinuous_22hzLowVariance) {
+ // these are 11.1s vsyncs with low variance, randomly computed, between -1 and 1ms
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 45259463, // 0
+ 91511026, // 1
+ 136307650, // 2
+ 1864501714, // 40
+ 1908641034, // 41
+ 1955278544, // 42
+ 4590180096, // 100
+ 4681594994, // 102
+ 5499224734, // 120
+ 5591378272, // 122
+ };
+ auto idealPeriod = 45454545;
+ auto expectedPeriod = 45450152;
+ auto expectedIntercept = 469647;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, againstOutliersDiscontinuous_500hzLowVariance) {
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 1992548, // 0
+ 4078038, // 1
+ 6165794, // 2
+ 7958171, // 3
+ 10193537, // 4
+ 2401840200, // 1200
+ 2403000000, // an outlier that should be excluded (1201 and a half)
+ 2405803629, // 1202
+ 2408028599, // 1203
+ 2410121051, // 1204
+ };
+ auto idealPeriod = 2000000;
+ auto expectedPeriod = 1999892;
+ auto expectedIntercept = 175409;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, handlesVsyncChange) {
+ auto const fastPeriod = 100;
+ auto const fastTimeBase = 100;
+ auto const slowPeriod = 400;
+ auto const slowTimeBase = 800;
+ auto const simulatedVsyncsFast =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod, fastTimeBase);
+ auto const simulatedVsyncsSlow =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, slowPeriod, slowTimeBase);
+
+ tracker.setPeriod(fastPeriod);
+ for (auto const& timestamp : simulatedVsyncsFast) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ auto const mMaxRoundingError = 100;
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(fastPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(0, mMaxRoundingError));
+
+ tracker.setPeriod(slowPeriod);
+ for (auto const& timestamp : simulatedVsyncsSlow) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(slowPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(0, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, willBeAccurateUsingPriorResultsForRate) {
+ auto const fastPeriod = 101000;
+ auto const fastTimeBase = fastPeriod - 500;
+ auto const fastPeriod2 = 99000;
+
+ auto const slowPeriod = 400000;
+ auto const slowTimeBase = 800000 - 201;
+ auto const simulatedVsyncsFast =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod, fastTimeBase);
+ auto const simulatedVsyncsSlow =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, slowPeriod, slowTimeBase);
+ auto const simulatedVsyncsFast2 =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod2, fastTimeBase);
+
+ auto idealPeriod = 100000;
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncsFast) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ tracker.setPeriod(slowPeriod);
+ for (auto const& timestamp : simulatedVsyncsSlow) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ // we had a model for 100ns mPeriod before, use that until the new samples are
+ // sufficiently built up
+ tracker.setPeriod(idealPeriod);
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ for (auto const& timestamp : simulatedVsyncsFast2) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod2));
+ EXPECT_THAT(intercept, Eq(0));
+}
+
+TEST_F(VSyncPredictorTest, willBecomeInaccurateAfterA_longTimeWithNoSamples) {
+ auto const simulatedVsyncs = generateVsyncTimestamps(kMinimumSamplesForPrediction, mPeriod, 0);
+
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto const mNow = *simulatedVsyncs.rbegin();
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+
+ // TODO: would be better to decay this as a result of the variance of the samples
+ static auto constexpr aLongTimeOut = 1000000000;
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow + aLongTimeOut));
+}
+
+TEST_F(VSyncPredictorTest, idealModelPredictionsBeforeRegressionModelIsBuilt) {
+ auto const simulatedVsyncs =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction + 1, mPeriod, 0);
+ nsecs_t const mNow = 0;
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mPeriod));
+
+ nsecs_t const aBitOfTime = 422;
+
+ for (auto i = 0; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(simulatedVsyncs[i]);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(simulatedVsyncs[i] + aBitOfTime),
+ Eq(mPeriod + simulatedVsyncs[i]));
+ }
+
+ for (auto i = kMinimumSamplesForPrediction; i < simulatedVsyncs.size(); i++) {
+ tracker.addVsyncTimestamp(simulatedVsyncs[i]);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(simulatedVsyncs[i] + aBitOfTime),
+ Eq(mPeriod + simulatedVsyncs[i]));
+ }
+}
+
+} // namespace android::scheduler