Merge "gralloc: libgralloctypes encode hidl_vec is invalid"
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index 61e22a4..8400cdc 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -933,6 +933,31 @@
unlink(path.c_str());
}
+static void DumpVisibleWindowViews() {
+ if (!ds.IsZipping()) {
+ MYLOGD("Not dumping visible views because it's not a zipped bugreport\n");
+ return;
+ }
+ DurationReporter duration_reporter("VISIBLE WINDOW VIEWS");
+ const std::string path = ds.bugreport_internal_dir_ + "/tmp_visible_window_views";
+ auto fd = android::base::unique_fd(TEMP_FAILURE_RETRY(open(path.c_str(),
+ O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC | O_NOFOLLOW,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)));
+ if (fd < 0) {
+ MYLOGE("Could not open %s to dump visible views.\n", path.c_str());
+ return;
+ }
+ RunCommandToFd(fd, "", {"cmd", "window", "dump-visible-window-views"},
+ CommandOptions::WithTimeout(120).Build());
+ bool empty = 0 == lseek(fd, 0, SEEK_END);
+ if (!empty) {
+ ds.AddZipEntry("visible_windows.zip", path);
+ } else {
+ MYLOGW("Failed to dump visible windows\n");
+ }
+ unlink(path.c_str());
+}
+
static void DumpIpTablesAsRoot() {
RunCommand("IPTABLES", {"iptables", "-L", "-nvx"});
RunCommand("IP6TABLES", {"ip6tables", "-L", "-nvx"});
@@ -1317,6 +1342,8 @@
RUN_SLOW_FUNCTION_WITH_CONSENT_CHECK(RunCommand, "PROCRANK", {"procrank"}, AS_ROOT_20);
+ RUN_SLOW_FUNCTION_WITH_CONSENT_CHECK(DumpVisibleWindowViews);
+
DumpFile("VIRTUAL MEMORY STATS", "/proc/vmstat");
DumpFile("VMALLOC INFO", "/proc/vmallocinfo");
DumpFile("SLAB INFO", "/proc/slabinfo");
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 4026f29..e6e232c 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -92,10 +92,6 @@
static constexpr const char* CACHE_DIR_POSTFIX = "/cache";
static constexpr const char* CODE_CACHE_DIR_POSTFIX = "/code_cache";
-static constexpr const char *kIdMapPath = "/system/bin/idmap";
-static constexpr const char* IDMAP_PREFIX = "/data/resource-cache/";
-static constexpr const char* IDMAP_SUFFIX = "@idmap";
-
// fsverity assumes the page size is always 4096. If not, the feature can not be
// enabled.
static constexpr int kVerityPageSize = 4096;
@@ -2253,206 +2249,6 @@
return res;
}
-static void run_idmap(const char *target_apk, const char *overlay_apk, int idmap_fd)
-{
- execl(kIdMapPath, kIdMapPath, "--fd", target_apk, overlay_apk,
- StringPrintf("%d", idmap_fd).c_str(), (char*)nullptr);
- PLOG(ERROR) << "execl (" << kIdMapPath << ") failed";
-}
-
-static void run_verify_idmap(const char *target_apk, const char *overlay_apk, int idmap_fd)
-{
- execl(kIdMapPath, kIdMapPath, "--verify", target_apk, overlay_apk,
- StringPrintf("%d", idmap_fd).c_str(), (char*)nullptr);
- PLOG(ERROR) << "execl (" << kIdMapPath << ") failed";
-}
-
-static bool delete_stale_idmap(const char* target_apk, const char* overlay_apk,
- const char* idmap_path, int32_t uid) {
- int idmap_fd = open(idmap_path, O_RDWR);
- if (idmap_fd < 0) {
- PLOG(ERROR) << "idmap open failed: " << idmap_path;
- unlink(idmap_path);
- return true;
- }
-
- pid_t pid;
- pid = fork();
- if (pid == 0) {
- /* child -- drop privileges before continuing */
- if (setgid(uid) != 0) {
- LOG(ERROR) << "setgid(" << uid << ") failed during idmap";
- exit(1);
- }
- if (setuid(uid) != 0) {
- LOG(ERROR) << "setuid(" << uid << ") failed during idmap";
- exit(1);
- }
- if (flock(idmap_fd, LOCK_EX | LOCK_NB) != 0) {
- PLOG(ERROR) << "flock(" << idmap_path << ") failed during idmap";
- exit(1);
- }
-
- run_verify_idmap(target_apk, overlay_apk, idmap_fd);
- exit(1); /* only if exec call to deleting stale idmap failed */
- } else {
- int status = wait_child(pid);
- close(idmap_fd);
-
- if (status != 0) {
- // Failed on verifying if idmap is made from target_apk and overlay_apk.
- LOG(DEBUG) << "delete stale idmap: " << idmap_path;
- unlink(idmap_path);
- return true;
- }
- }
- return false;
-}
-
-// Transform string /a/b/c.apk to (prefix)/a@b@c.apk@(suffix)
-// eg /a/b/c.apk to /data/resource-cache/a@b@c.apk@idmap
-static int flatten_path(const char *prefix, const char *suffix,
- const char *overlay_path, char *idmap_path, size_t N)
-{
- if (overlay_path == nullptr || idmap_path == nullptr) {
- return -1;
- }
- const size_t len_overlay_path = strlen(overlay_path);
- // will access overlay_path + 1 further below; requires absolute path
- if (len_overlay_path < 2 || *overlay_path != '/') {
- return -1;
- }
- const size_t len_idmap_root = strlen(prefix);
- const size_t len_suffix = strlen(suffix);
- if (SIZE_MAX - len_idmap_root < len_overlay_path ||
- SIZE_MAX - (len_idmap_root + len_overlay_path) < len_suffix) {
- // additions below would cause overflow
- return -1;
- }
- if (N < len_idmap_root + len_overlay_path + len_suffix) {
- return -1;
- }
- memset(idmap_path, 0, N);
- snprintf(idmap_path, N, "%s%s%s", prefix, overlay_path + 1, suffix);
- char *ch = idmap_path + len_idmap_root;
- while (*ch != '\0') {
- if (*ch == '/') {
- *ch = '@';
- }
- ++ch;
- }
- return 0;
-}
-
-binder::Status InstalldNativeService::idmap(const std::string& targetApkPath,
- const std::string& overlayApkPath, int32_t uid) {
- ENFORCE_UID(AID_SYSTEM);
- CHECK_ARGUMENT_PATH(targetApkPath);
- CHECK_ARGUMENT_PATH(overlayApkPath);
- std::lock_guard<std::recursive_mutex> lock(mLock);
-
- const char* target_apk = targetApkPath.c_str();
- const char* overlay_apk = overlayApkPath.c_str();
- ALOGV("idmap target_apk=%s overlay_apk=%s uid=%d\n", target_apk, overlay_apk, uid);
-
- int idmap_fd = -1;
- char idmap_path[PATH_MAX];
- struct stat idmap_stat;
- bool outdated = false;
-
- if (flatten_path(IDMAP_PREFIX, IDMAP_SUFFIX, overlay_apk,
- idmap_path, sizeof(idmap_path)) == -1) {
- ALOGE("idmap cannot generate idmap path for overlay %s\n", overlay_apk);
- goto fail;
- }
-
- if (stat(idmap_path, &idmap_stat) < 0) {
- outdated = true;
- } else {
- outdated = delete_stale_idmap(target_apk, overlay_apk, idmap_path, uid);
- }
-
- if (outdated) {
- idmap_fd = open(idmap_path, O_RDWR | O_CREAT | O_EXCL, 0644);
- } else {
- idmap_fd = open(idmap_path, O_RDWR);
- }
-
- if (idmap_fd < 0) {
- ALOGE("idmap cannot open '%s' for output: %s\n", idmap_path, strerror(errno));
- goto fail;
- }
- if (fchown(idmap_fd, AID_SYSTEM, uid) < 0) {
- ALOGE("idmap cannot chown '%s'\n", idmap_path);
- goto fail;
- }
- if (fchmod(idmap_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) < 0) {
- ALOGE("idmap cannot chmod '%s'\n", idmap_path);
- goto fail;
- }
-
- if (!outdated) {
- close(idmap_fd);
- return ok();
- }
-
- pid_t pid;
- pid = fork();
- if (pid == 0) {
- /* child -- drop privileges before continuing */
- if (setgid(uid) != 0) {
- ALOGE("setgid(%d) failed during idmap\n", uid);
- exit(1);
- }
- if (setuid(uid) != 0) {
- ALOGE("setuid(%d) failed during idmap\n", uid);
- exit(1);
- }
- if (flock(idmap_fd, LOCK_EX | LOCK_NB) != 0) {
- ALOGE("flock(%s) failed during idmap: %s\n", idmap_path, strerror(errno));
- exit(1);
- }
-
- run_idmap(target_apk, overlay_apk, idmap_fd);
- exit(1); /* only if exec call to idmap failed */
- } else {
- int status = wait_child(pid);
- if (status != 0) {
- ALOGE("idmap failed, status=0x%04x\n", status);
- goto fail;
- }
- }
-
- close(idmap_fd);
- return ok();
-fail:
- if (idmap_fd >= 0) {
- close(idmap_fd);
- unlink(idmap_path);
- }
- return error();
-}
-
-binder::Status InstalldNativeService::removeIdmap(const std::string& overlayApkPath) {
- ENFORCE_UID(AID_SYSTEM);
- CHECK_ARGUMENT_PATH(overlayApkPath);
- std::lock_guard<std::recursive_mutex> lock(mLock);
-
- const char* overlay_apk = overlayApkPath.c_str();
- char idmap_path[PATH_MAX];
-
- if (flatten_path(IDMAP_PREFIX, IDMAP_SUFFIX, overlay_apk,
- idmap_path, sizeof(idmap_path)) == -1) {
- ALOGE("idmap cannot generate idmap path for overlay %s\n", overlay_apk);
- return error();
- }
- if (unlink(idmap_path) < 0) {
- ALOGE("couldn't unlink idmap file %s\n", idmap_path);
- return error();
- }
- return ok();
-}
-
binder::Status InstalldNativeService::restoreconAppData(const std::unique_ptr<std::string>& uuid,
const std::string& packageName, int32_t userId, int32_t flags, int32_t appId,
const std::string& seInfo) {
diff --git a/cmds/installd/InstalldNativeService.h b/cmds/installd/InstalldNativeService.h
index 2b7bf33..ef91bf8 100644
--- a/cmds/installd/InstalldNativeService.h
+++ b/cmds/installd/InstalldNativeService.h
@@ -119,9 +119,6 @@
binder::Status destroyProfileSnapshot(const std::string& packageName,
const std::string& profileName);
- binder::Status idmap(const std::string& targetApkPath, const std::string& overlayApkPath,
- int32_t uid);
- binder::Status removeIdmap(const std::string& overlayApkPath);
binder::Status rmPackageDir(const std::string& packageDir);
binder::Status markBootComplete(const std::string& instructionSet);
binder::Status freeCache(const std::unique_ptr<std::string>& uuid, int64_t targetFreeBytes,
diff --git a/cmds/installd/binder/android/os/IInstalld.aidl b/cmds/installd/binder/android/os/IInstalld.aidl
index d99bcc8..6cc4bde 100644
--- a/cmds/installd/binder/android/os/IInstalld.aidl
+++ b/cmds/installd/binder/android/os/IInstalld.aidl
@@ -72,8 +72,6 @@
@utf8InCpp String profileName, @utf8InCpp String classpath);
void destroyProfileSnapshot(@utf8InCpp String packageName, @utf8InCpp String profileName);
- void idmap(@utf8InCpp String targetApkPath, @utf8InCpp String overlayApkPath, int uid);
- void removeIdmap(@utf8InCpp String overlayApkPath);
void rmPackageDir(@utf8InCpp String packageDir);
void markBootComplete(@utf8InCpp String instructionSet);
void freeCache(@nullable @utf8InCpp String uuid, long targetFreeBytes,
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 616c3b2..f95e445 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -339,6 +339,10 @@
? "dalvik.vm.dex2oat-threads"
: "dalvik.vm.boot-dex2oat-threads";
std::string dex2oat_threads_arg = MapPropertyToArg(threads_property, "-j%s");
+ const char* cpu_set_property = post_bootcomplete
+ ? "dalvik.vm.dex2oat-cpu-set"
+ : "dalvik.vm.boot-dex2oat-cpu-set";
+ std::string dex2oat_cpu_set_arg = MapPropertyToArg(cpu_set_property, "--cpu-set=%s");
std::string bootclasspath;
char* dex2oat_bootclasspath = getenv("DEX2OATBOOTCLASSPATH");
@@ -518,6 +522,7 @@
AddArg(image_block_size_arg);
AddArg(dex2oat_compiler_filter_arg);
AddArg(dex2oat_threads_arg);
+ AddArg(dex2oat_cpu_set_arg);
AddArg(dex2oat_swap_fd);
AddArg(dex2oat_image_fd);
diff --git a/cmds/installd/migrate_legacy_obb_data.sh b/cmds/installd/migrate_legacy_obb_data.sh
index 0e6d7b9..7399681 100644
--- a/cmds/installd/migrate_legacy_obb_data.sh
+++ b/cmds/installd/migrate_legacy_obb_data.sh
@@ -15,17 +15,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-rm -rf /data/media/Android/obb/test_probe
-mkdir -p /data/media/Android/obb/
-touch /data/media/Android/obb/test_probe
+rm -rf /data/media/0/Android/obb/test_probe
+mkdir -p /data/media/0/Android/obb/
+touch /data/media/0/Android/obb/test_probe
if ! test -f /data/media/0/Android/obb/test_probe ; then
log -p i -t migrate_legacy_obb_data "No support for 'unshared_obb'. Not migrating"
- rm -rf /data/media/Android/obb/test_probe
+ rm -rf /data/media/0/Android/obb/test_probe
exit 0
fi
# Delete the test file, and remove the obb folder if it is empty
-rm -rf /data/media/Android/obb/test_probe
+rm -rf /data/media/0/Android/obb/test_probe
rmdir /data/media/obb
if ! test -d /data/media/obb ; then
diff --git a/cmds/installd/otapreopt.cpp b/cmds/installd/otapreopt.cpp
index db36ce3..eefbe4f 100644
--- a/cmds/installd/otapreopt.cpp
+++ b/cmds/installd/otapreopt.cpp
@@ -480,6 +480,10 @@
"-j",
false,
cmd);
+ AddCompilerOptionFromSystemProperty("dalvik.vm.image-dex2oat-cpu-set",
+ "--cpu-set=",
+ false,
+ cmd);
AddCompilerOptionFromSystemProperty(
StringPrintf("dalvik.vm.isa.%s.variant", isa).c_str(),
"--instruction-set-variant=",
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 861401c..141171b 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -68,7 +68,15 @@
}
#endif // !VENDORSERVICEMANAGER
-ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {}
+ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {
+#ifndef VENDORSERVICEMANAGER
+ // can process these at any times, don't want to delay first VINTF client
+ std::thread([] {
+ vintf::VintfObject::GetDeviceHalManifest();
+ vintf::VintfObject::GetFrameworkHalManifest();
+ }).detach();
+#endif // !VENDORSERVICEMANAGER
+}
ServiceManager::~ServiceManager() {
// this should only happen in tests
@@ -306,7 +314,7 @@
if (listeners.empty()) {
*it = mNameToCallback.erase(*it);
} else {
- it++;
+ (*it)++;
}
}
diff --git a/libs/binder/IServiceManager.cpp b/libs/binder/IServiceManager.cpp
index 4f47db1..bac8b66 100644
--- a/libs/binder/IServiceManager.cpp
+++ b/libs/binder/IServiceManager.cpp
@@ -280,19 +280,31 @@
std::condition_variable mCv;
};
+ // Simple RAII object to ensure a function call immediately before going out of scope
+ class Defer {
+ public:
+ Defer(std::function<void()>&& f) : mF(std::move(f)) {}
+ ~Defer() { mF(); }
+ private:
+ std::function<void()> mF;
+ };
+
const std::string name = String8(name16).c_str();
sp<IBinder> out;
if (!mTheRealServiceManager->getService(name, &out).isOk()) {
return nullptr;
}
- if(out != nullptr) return out;
+ if (out != nullptr) return out;
sp<Waiter> waiter = new Waiter;
if (!mTheRealServiceManager->registerForNotifications(
name, waiter).isOk()) {
return nullptr;
}
+ Defer unregister ([&] {
+ mTheRealServiceManager->unregisterForNotifications(name, waiter);
+ });
while(true) {
{
@@ -316,7 +328,7 @@
if (!mTheRealServiceManager->getService(name, &out).isOk()) {
return nullptr;
}
- if(out != nullptr) return out;
+ if (out != nullptr) return out;
ALOGW("Waited one second for %s", name.c_str());
}
diff --git a/libs/binder/Status.cpp b/libs/binder/Status.cpp
index 0ad99ce..674f065 100644
--- a/libs/binder/Status.cpp
+++ b/libs/binder/Status.cpp
@@ -232,9 +232,10 @@
ret.append("No error");
} else {
ret.appendFormat("Status(%d, %s): '", mException, exceptionToString(mException).c_str());
- if (mException == EX_SERVICE_SPECIFIC ||
- mException == EX_TRANSACTION_FAILED) {
+ if (mException == EX_SERVICE_SPECIFIC) {
ret.appendFormat("%d: ", mErrorCode);
+ } else if (mException == EX_TRANSACTION_FAILED) {
+ ret.appendFormat("%s: ", statusToString(mErrorCode).c_str());
}
ret.append(String8(mMessage));
ret.append("'");
diff --git a/libs/binder/include/binder/IInterface.h b/libs/binder/include/binder/IInterface.h
index 28ffa48..8d72a6b 100644
--- a/libs/binder/include/binder/IInterface.h
+++ b/libs/binder/include/binder/IInterface.h
@@ -38,12 +38,32 @@
// ----------------------------------------------------------------------
+/**
+ * If this is a local object and the descriptor matches, this will return the
+ * actual local object which is implementing the interface. Otherwise, this will
+ * return a proxy to the interface without checking the interface descriptor.
+ * This means that subsequent calls may fail with BAD_TYPE.
+ */
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
+/**
+ * This is the same as interface_cast, except that it always checks to make sure
+ * the descriptor matches, and if it doesn't match, it will return nullptr.
+ */
+template<typename INTERFACE>
+inline sp<INTERFACE> checked_interface_cast(const sp<IBinder>& obj)
+{
+ if (obj->getInterfaceDescriptor() != INTERFACE::descriptor) {
+ return nullptr;
+ }
+
+ return interface_cast<INTERFACE>(obj);
+}
+
// ----------------------------------------------------------------------
template<typename INTERFACE>
diff --git a/libs/binder/ndk/include_ndk/android/binder_ibinder.h b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
index 4d5c044..4560f22 100644
--- a/libs/binder/ndk/include_ndk/android/binder_ibinder.h
+++ b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
@@ -34,6 +34,12 @@
#include <android/binder_status.h>
__BEGIN_DECLS
+
+#ifndef __ANDROID_API__
+#error Android builds must be compiled against a specific API. If this is an \
+ android platform host build, you must use libbinder_ndk_host_user.
+#endif
+
#if __ANDROID_API__ >= 29
// Also see TF_* in kernel's binder.h
diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
index f3bc31b..7871667 100644
--- a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
+++ b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
@@ -441,6 +441,42 @@
}
/**
+ * Writes a ScopedFileDescriptor object inside a std::vector<ScopedFileDescriptor> at index 'index'
+ * to 'parcel'.
+ */
+template <>
+inline binder_status_t AParcel_writeStdVectorParcelableElement<ScopedFileDescriptor>(
+ AParcel* parcel, const void* vectorData, size_t index) {
+ const std::vector<ScopedFileDescriptor>* vector =
+ static_cast<const std::vector<ScopedFileDescriptor>*>(vectorData);
+ int writeFd = vector->at(index).get();
+ if (writeFd < 0) {
+ return STATUS_UNEXPECTED_NULL;
+ }
+ return AParcel_writeParcelFileDescriptor(parcel, writeFd);
+}
+
+/**
+ * Reads a ScopedFileDescriptor object inside a std::vector<ScopedFileDescriptor> at index 'index'
+ * from 'parcel'.
+ */
+template <>
+inline binder_status_t AParcel_readStdVectorParcelableElement<ScopedFileDescriptor>(
+ const AParcel* parcel, void* vectorData, size_t index) {
+ std::vector<ScopedFileDescriptor>* vector =
+ static_cast<std::vector<ScopedFileDescriptor>*>(vectorData);
+ int readFd;
+ binder_status_t status = AParcel_readParcelFileDescriptor(parcel, &readFd);
+ if (status == STATUS_OK) {
+ if (readFd < 0) {
+ return STATUS_UNEXPECTED_NULL;
+ }
+ vector->at(index).set(readFd);
+ }
+ return status;
+}
+
+/**
* Convenience API for writing a std::vector<P>
*/
template <typename P>
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index db4a36b..94ab9f0 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -1034,9 +1034,9 @@
binder_buffer_object obj {
.hdr = { .type = BINDER_TYPE_PTR },
+ .flags = 0,
.buffer = reinterpret_cast<binder_uintptr_t>((void*)&buf),
.length = 4,
- .flags = 0,
};
data.setDataCapacity(1024);
// Write a bogus object at offset 0 to get an entry in the offset table
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 45fea85..4ee9f55 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -397,7 +397,7 @@
if (deleteMapEntry(gTisMapFd, &key) && errno != ENOENT) return false;
}
- concurrent_val_t czeros = {.policy = {0}, .active = {0}};
+ concurrent_val_t czeros = { .active = {0}, .policy = {0}, };
std::vector<concurrent_val_t> cvals(gNCpus, czeros);
for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
if (writeToMapEntry(gConcurrentMapFd, &key, cvals.data(), BPF_EXIST) && errno != ENOENT)
diff --git a/libs/dumputils/dump_utils.cpp b/libs/dumputils/dump_utils.cpp
index 250f902..56b94c1 100644
--- a/libs/dumputils/dump_utils.cpp
+++ b/libs/dumputils/dump_utils.cpp
@@ -62,6 +62,9 @@
"android.hardware.sensors@1.0::ISensors",
"android.hardware.thermal@2.0::IThermal",
"android.hardware.vr@1.0::IVr",
+ "android.hardware.automotive.audiocontrol@1.0::IAudioControl",
+ "android.hardware.automotive.vehicle@2.0::IVehicle",
+ "android.hardware.automotive.evs@1.0::IEvsCamera",
NULL,
};
diff --git a/libs/gui/ISurfaceComposer.cpp b/libs/gui/ISurfaceComposer.cpp
index 5805797..b9597db 100644
--- a/libs/gui/ISurfaceComposer.cpp
+++ b/libs/gui/ISurfaceComposer.cpp
@@ -978,6 +978,35 @@
}
return NO_ERROR;
}
+
+ virtual status_t setGlobalShadowSettings(const half4& ambientColor, const half4& spotColor,
+ float lightPosY, float lightPosZ, float lightRadius) {
+ Parcel data, reply;
+ status_t error = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
+ if (error != NO_ERROR) {
+ ALOGE("setGlobalShadowSettings: failed to write interface token: %d", error);
+ return error;
+ }
+
+ std::vector<float> shadowConfig = {ambientColor.r, ambientColor.g, ambientColor.b,
+ ambientColor.a, spotColor.r, spotColor.g,
+ spotColor.b, spotColor.a, lightPosY,
+ lightPosZ, lightRadius};
+
+ error = data.writeFloatVector(shadowConfig);
+ if (error != NO_ERROR) {
+ ALOGE("setGlobalShadowSettings: failed to write shadowConfig: %d", error);
+ return error;
+ }
+
+ error = remote()->transact(BnSurfaceComposer::SET_GLOBAL_SHADOW_SETTINGS, data, &reply,
+ IBinder::FLAG_ONEWAY);
+ if (error != NO_ERROR) {
+ ALOGE("setGlobalShadowSettings: failed to transact: %d", error);
+ return error;
+ }
+ return NO_ERROR;
+ }
};
// Out-of-line virtual method definition to trigger vtable emission in this
@@ -1593,6 +1622,25 @@
}
return notifyPowerHint(hintId);
}
+ case SET_GLOBAL_SHADOW_SETTINGS: {
+ CHECK_INTERFACE(ISurfaceComposer, data, reply);
+
+ std::vector<float> shadowConfig;
+ status_t error = data.readFloatVector(&shadowConfig);
+ if (error != NO_ERROR || shadowConfig.size() != 11) {
+ ALOGE("setGlobalShadowSettings: failed to read shadowConfig: %d", error);
+ return error;
+ }
+
+ half4 ambientColor = {shadowConfig[0], shadowConfig[1], shadowConfig[2],
+ shadowConfig[3]};
+ half4 spotColor = {shadowConfig[4], shadowConfig[5], shadowConfig[6], shadowConfig[7]};
+ float lightPosY = shadowConfig[8];
+ float lightPosZ = shadowConfig[9];
+ float lightRadius = shadowConfig[10];
+ return setGlobalShadowSettings(ambientColor, spotColor, lightPosY, lightPosZ,
+ lightRadius);
+ }
default: {
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/libs/gui/SurfaceComposerClient.cpp b/libs/gui/SurfaceComposerClient.cpp
index a538e14..2ab4d8a 100644
--- a/libs/gui/SurfaceComposerClient.cpp
+++ b/libs/gui/SurfaceComposerClient.cpp
@@ -1726,6 +1726,14 @@
return ComposerService::getComposerService()->notifyPowerHint(hintId);
}
+status_t SurfaceComposerClient::setGlobalShadowSettings(const half4& ambientColor,
+ const half4& spotColor, float lightPosY,
+ float lightPosZ, float lightRadius) {
+ return ComposerService::getComposerService()->setGlobalShadowSettings(ambientColor, spotColor,
+ lightPosY, lightPosZ,
+ lightRadius);
+}
+
// ----------------------------------------------------------------------------
status_t ScreenshotClient::capture(const sp<IBinder>& display, const ui::Dataspace reqDataSpace,
diff --git a/libs/gui/include/gui/ISurfaceComposer.h b/libs/gui/include/gui/ISurfaceComposer.h
index f2bae98..514dfe2 100644
--- a/libs/gui/include/gui/ISurfaceComposer.h
+++ b/libs/gui/include/gui/ISurfaceComposer.h
@@ -25,6 +25,8 @@
#include <gui/ITransactionCompletedListener.h>
+#include <math/vec4.h>
+
#include <ui/ConfigStoreTypes.h>
#include <ui/DisplayedFrameStats.h>
#include <ui/FrameStats.h>
@@ -439,6 +441,28 @@
* Returns NO_ERROR upon success.
*/
virtual status_t notifyPowerHint(int32_t hintId) = 0;
+
+ /*
+ * Sets the global configuration for all the shadows drawn by SurfaceFlinger. Shadow follows
+ * material design guidelines.
+ *
+ * ambientColor
+ * Color to the ambient shadow. The alpha is premultiplied.
+ *
+ * spotColor
+ * Color to the spot shadow. The alpha is premultiplied. The position of the spot shadow
+ * depends on the light position.
+ *
+ * lightPosY/lightPosZ
+ * Position of the light used to cast the spot shadow. The X value is always the display
+ * width / 2.
+ *
+ * lightRadius
+ * Radius of the light casting the shadow.
+ */
+ virtual status_t setGlobalShadowSettings(const half4& ambientColor, const half4& spotColor,
+ float lightPosY, float lightPosZ,
+ float lightRadius) = 0;
};
// ----------------------------------------------------------------------------
@@ -492,6 +516,7 @@
SET_DISPLAY_BRIGHTNESS,
CAPTURE_SCREEN_BY_ID,
NOTIFY_POWER_HINT,
+ SET_GLOBAL_SHADOW_SETTINGS,
// Always append new enum to the end.
};
diff --git a/libs/gui/include/gui/SurfaceComposerClient.h b/libs/gui/include/gui/SurfaceComposerClient.h
index 37387ac..d218356 100644
--- a/libs/gui/include/gui/SurfaceComposerClient.h
+++ b/libs/gui/include/gui/SurfaceComposerClient.h
@@ -214,6 +214,27 @@
*/
static status_t notifyPowerHint(int32_t hintId);
+ /*
+ * Sets the global configuration for all the shadows drawn by SurfaceFlinger. Shadow follows
+ * material design guidelines.
+ *
+ * ambientColor
+ * Color to the ambient shadow. The alpha is premultiplied.
+ *
+ * spotColor
+ * Color to the spot shadow. The alpha is premultiplied. The position of the spot shadow
+ * depends on the light position.
+ *
+ * lightPosY/lightPosZ
+ * Position of the light used to cast the spot shadow. The X value is always the display
+ * width / 2.
+ *
+ * lightRadius
+ * Radius of the light casting the shadow.
+ */
+ static status_t setGlobalShadowSettings(const half4& ambientColor, const half4& spotColor,
+ float lightPosY, float lightPosZ, float lightRadius);
+
// ------------------------------------------------------------------------
// surface creation / destruction
diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp
index a4fdb35..c4f35ae 100644
--- a/libs/gui/tests/Surface_test.cpp
+++ b/libs/gui/tests/Surface_test.cpp
@@ -833,6 +833,12 @@
}
status_t notifyPowerHint(int32_t /*hintId*/) override { return NO_ERROR; }
+ status_t setGlobalShadowSettings(const half4& /*ambientColor*/, const half4& /*spotColor*/,
+ float /*lightPosY*/, float /*lightPosZ*/,
+ float /*lightRadius*/) override {
+ return NO_ERROR;
+ }
+
protected:
IBinder* onAsBinder() override { return nullptr; }
diff --git a/libs/nativedisplay/ADisplay.cpp b/libs/nativedisplay/ADisplay.cpp
index 6665635..6566538 100644
--- a/libs/nativedisplay/ADisplay.cpp
+++ b/libs/nativedisplay/ADisplay.cpp
@@ -18,6 +18,7 @@
#include <gui/SurfaceComposerClient.h>
#include <ui/DisplayInfo.h>
#include <ui/GraphicTypes.h>
+#include <ui/PixelFormat.h>
#include <algorithm>
#include <optional>
@@ -82,6 +83,16 @@
ADisplayType type;
/**
+ * The preferred WCG dataspace
+ */
+ ADataSpace wcgDataspace;
+
+ /**
+ * The preferred WCG pixel format
+ */
+ AHardwareBuffer_Format wcgPixelFormat;
+
+ /**
* Number of supported configs
*/
size_t numConfigs;
@@ -151,6 +162,17 @@
const std::optional<PhysicalDisplayId> internalId =
SurfaceComposerClient::getInternalDisplayId();
+ ui::Dataspace defaultDataspace;
+ ui::PixelFormat defaultPixelFormat;
+ ui::Dataspace wcgDataspace;
+ ui::PixelFormat wcgPixelFormat;
+
+ const status_t status =
+ SurfaceComposerClient::getCompositionPreference(&defaultDataspace, &defaultPixelFormat,
+ &wcgDataspace, &wcgPixelFormat);
+ if (status != NO_ERROR) {
+ return status;
+ }
// Here we allocate all our required memory in one block. The layout is as
// follows:
@@ -176,7 +198,12 @@
const std::vector<DisplayConfigImpl>& configs = configsPerDisplay[i];
memcpy(configData, configs.data(), sizeof(DisplayConfigImpl) * configs.size());
- displayData[i] = DisplayImpl{id, type, configs.size(), configData};
+ displayData[i] = DisplayImpl{id,
+ type,
+ static_cast<ADataSpace>(wcgDataspace),
+ static_cast<AHardwareBuffer_Format>(wcgPixelFormat),
+ configs.size(),
+ configData};
impls[i] = displayData + i;
// Advance the configData pointer so that future configs are written to
// the correct display.
@@ -210,6 +237,17 @@
return reinterpret_cast<DisplayImpl*>(display)->type;
}
+void ADisplay_getPreferredWideColorFormat(ADisplay* display, ADataSpace* outDataspace,
+ AHardwareBuffer_Format* outPixelFormat) {
+ CHECK_NOT_NULL(display);
+ CHECK_NOT_NULL(outDataspace);
+ CHECK_NOT_NULL(outPixelFormat);
+
+ DisplayImpl* impl = reinterpret_cast<DisplayImpl*>(display);
+ *outDataspace = impl->wcgDataspace;
+ *outPixelFormat = impl->wcgPixelFormat;
+}
+
int ADisplay_getCurrentConfig(ADisplay* display, ADisplayConfig** outConfig) {
CHECK_NOT_NULL(display);
diff --git a/libs/nativedisplay/Android.bp b/libs/nativedisplay/Android.bp
index 45b935a..a9b8d66 100644
--- a/libs/nativedisplay/Android.bp
+++ b/libs/nativedisplay/Android.bp
@@ -49,6 +49,7 @@
"libandroidfw",
"libgui",
"liblog",
+ "libnativewindow",
"libui",
"libutils",
],
diff --git a/libs/nativedisplay/include/apex/display.h b/libs/nativedisplay/include/apex/display.h
index 7af452a..9be401e 100644
--- a/libs/nativedisplay/include/apex/display.h
+++ b/libs/nativedisplay/include/apex/display.h
@@ -16,6 +16,8 @@
#pragma once
+#include <android/data_space.h>
+#include <android/hardware_buffer.h>
#include <inttypes.h>
__BEGIN_DECLS
@@ -72,6 +74,12 @@
ADisplayType ADisplay_getDisplayType(ADisplay* display);
/**
+ * Queries the display's preferred WCG format
+ */
+void ADisplay_getPreferredWideColorFormat(ADisplay* display, ADataSpace* outDataspace,
+ AHardwareBuffer_Format* outPixelFormat);
+
+/**
* Gets the current display configuration for the given display.
*
* Memory is *not* allocated for the caller. As such, the returned output
diff --git a/libs/nativewindow/Android.bp b/libs/nativewindow/Android.bp
index 27ab482..55400c7 100644
--- a/libs/nativewindow/Android.bp
+++ b/libs/nativewindow/Android.bp
@@ -85,6 +85,11 @@
export_header_lib_headers: [
"libnativebase_headers",
],
+
+ stubs: {
+ symbol_file: "libnativewindow.map.txt",
+ versions: ["29"],
+ },
}
llndk_library {
diff --git a/libs/nativewindow/libnativewindow.map.txt b/libs/nativewindow/libnativewindow.map.txt
index daf1dcc..f59e8f0 100644
--- a/libs/nativewindow/libnativewindow.map.txt
+++ b/libs/nativewindow/libnativewindow.map.txt
@@ -2,9 +2,9 @@
global:
AHardwareBuffer_acquire;
AHardwareBuffer_allocate;
- AHardwareBuffer_createFromHandle; # llndk
+ AHardwareBuffer_createFromHandle; # llndk # apex
AHardwareBuffer_describe;
- AHardwareBuffer_getNativeHandle; # llndk
+ AHardwareBuffer_getNativeHandle; # llndk # apex
AHardwareBuffer_isSupported; # introduced=29
AHardwareBuffer_lock;
AHardwareBuffer_lockAndGetInfo; # introduced=29
diff --git a/libs/nativewindow/tests/AHardwareBufferTest.cpp b/libs/nativewindow/tests/AHardwareBufferTest.cpp
index cc2731d..71b1f9f 100644
--- a/libs/nativewindow/tests/AHardwareBufferTest.cpp
+++ b/libs/nativewindow/tests/AHardwareBufferTest.cpp
@@ -20,6 +20,7 @@
#include <android/hardware_buffer.h>
#include <private/android/AHardwareBufferHelpers.h>
#include <android/hardware/graphics/common/1.0/types.h>
+#include <vndk/hardware_buffer.h>
#include <gtest/gtest.h>
@@ -100,9 +101,33 @@
(uint64_t)BufferUsage::CPU_WRITE_RARELY,
AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY));
-EXPECT_TRUE(TestUsageConversion(
+ EXPECT_TRUE(TestUsageConversion(
(uint64_t)BufferUsage::GPU_RENDER_TARGET | (uint64_t)BufferUsage::GPU_TEXTURE |
- 1ull << 29 | 1ull << 57,
+ 1ull << 29 | 1ull << 57,
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_VENDOR_1 | AHARDWAREBUFFER_USAGE_VENDOR_13));
}
+
+TEST(AHardwareBufferTest, GetCreateHandleTest) {
+ AHardwareBuffer_Desc desc{
+ .width = 64,
+ .height = 1,
+ .layers = 1,
+ .format = AHARDWAREBUFFER_FORMAT_BLOB,
+ .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+ .stride = 64,
+ };
+
+ AHardwareBuffer* buffer = nullptr;
+ EXPECT_EQ(0, AHardwareBuffer_allocate(&desc, &buffer));
+ const native_handle_t* handle = AHardwareBuffer_getNativeHandle(buffer);
+ EXPECT_NE(nullptr, handle);
+
+ AHardwareBuffer* otherBuffer = nullptr;
+ EXPECT_EQ(0, AHardwareBuffer_createFromHandle(
+ &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &otherBuffer));
+ EXPECT_NE(nullptr, otherBuffer);
+
+ AHardwareBuffer_release(buffer);
+ AHardwareBuffer_release(otherBuffer);
+}
diff --git a/libs/ui/Gralloc4.cpp b/libs/ui/Gralloc4.cpp
index dc105c0..73945cf 100644
--- a/libs/ui/Gralloc4.cpp
+++ b/libs/ui/Gralloc4.cpp
@@ -32,7 +32,6 @@
using android::hardware::graphics::mapper::V4_0::BufferDescriptor;
using android::hardware::graphics::mapper::V4_0::Error;
using android::hardware::graphics::mapper::V4_0::IMapper;
-using android::hardware::graphics::mapper::V4_0::YCbCrLayout;
namespace android {
@@ -190,6 +189,16 @@
status_t Gralloc4Mapper::lock(buffer_handle_t bufferHandle, uint64_t usage, const Rect& bounds,
int acquireFence, void** outData, int32_t* outBytesPerPixel,
int32_t* outBytesPerStride) const {
+ // In Gralloc 4 we can get this info per plane. Clients should check per plane.
+ if (outBytesPerPixel) {
+ // TODO add support to check per plane
+ *outBytesPerPixel = -1;
+ }
+ if (outBytesPerStride) {
+ // TODO add support to check per plane
+ *outBytesPerStride = -1;
+ }
+
auto buffer = const_cast<native_handle_t*>(bufferHandle);
IMapper::Rect accessRegion = sGralloc4Rect(bounds);
@@ -205,19 +214,12 @@
Error error;
auto ret = mMapper->lock(buffer, usage, accessRegion, acquireFenceHandle,
- [&](const auto& tmpError, const auto& tmpData,
- const auto& tmpBytesPerPixel, const auto& tmpBytesPerStride) {
+ [&](const auto& tmpError, const auto& tmpData) {
error = tmpError;
if (error != Error::NONE) {
return;
}
*outData = tmpData;
- if (outBytesPerPixel) {
- *outBytesPerPixel = tmpBytesPerPixel;
- }
- if (outBytesPerStride) {
- *outBytesPerStride = tmpBytesPerStride;
- }
});
// we own acquireFence even on errors
@@ -232,48 +234,11 @@
return static_cast<status_t>(error);
}
-status_t Gralloc4Mapper::lock(buffer_handle_t bufferHandle, uint64_t usage, const Rect& bounds,
- int acquireFence, android_ycbcr* ycbcr) const {
- auto buffer = const_cast<native_handle_t*>(bufferHandle);
-
- IMapper::Rect accessRegion = sGralloc4Rect(bounds);
-
- // put acquireFence in a hidl_handle
- hardware::hidl_handle acquireFenceHandle;
- NATIVE_HANDLE_DECLARE_STORAGE(acquireFenceStorage, 1, 0);
- if (acquireFence >= 0) {
- auto h = native_handle_init(acquireFenceStorage, 1, 0);
- h->data[0] = acquireFence;
- acquireFenceHandle = h;
- }
-
- YCbCrLayout layout;
- Error error;
- auto ret = mMapper->lockYCbCr(buffer, usage, accessRegion, acquireFenceHandle,
- [&](const auto& tmpError, const auto& tmpLayout) {
- error = tmpError;
- if (error != Error::NONE) {
- return;
- }
-
- layout = tmpLayout;
- });
-
- if (error == Error::NONE) {
- ycbcr->y = layout.y;
- ycbcr->cb = layout.cb;
- ycbcr->cr = layout.cr;
- ycbcr->ystride = static_cast<size_t>(layout.yStride);
- ycbcr->cstride = static_cast<size_t>(layout.cStride);
- ycbcr->chroma_step = static_cast<size_t>(layout.chromaStep);
- }
-
- // we own acquireFence even on errors
- if (acquireFence >= 0) {
- close(acquireFence);
- }
-
- return static_cast<status_t>((ret.isOk()) ? error : kTransactionError);
+status_t Gralloc4Mapper::lock(buffer_handle_t /*bufferHandle*/, uint64_t /*usage*/,
+ const Rect& /*bounds*/, int /*acquireFence*/,
+ android_ycbcr* /*ycbcr*/) const {
+ // TODO add lockYCbCr support
+ return static_cast<status_t>(Error::UNSUPPORTED);
}
int Gralloc4Mapper::unlock(buffer_handle_t bufferHandle) const {
diff --git a/libs/vr/libvrflinger/hardware_composer.cpp b/libs/vr/libvrflinger/hardware_composer.cpp
index 67607af..188ac6b 100644
--- a/libs/vr/libvrflinger/hardware_composer.cpp
+++ b/libs/vr/libvrflinger/hardware_composer.cpp
@@ -1201,6 +1201,20 @@
return Void();
}
+Return<void> HardwareComposer::ComposerCallback::onVsync_2_4(
+ Hwc2::Display /*display*/, int64_t /*timestamp*/,
+ Hwc2::VsyncPeriodNanos /*vsyncPeriodNanos*/) {
+ LOG_ALWAYS_FATAL("Unexpected onVsync_2_4 callback");
+ return Void();
+}
+
+Return<void> HardwareComposer::ComposerCallback::onVsyncPeriodTimingChanged(
+ Hwc2::Display /*display*/,
+ const Hwc2::VsyncPeriodChangeTimeline& /*updatedTimeline*/) {
+ LOG_ALWAYS_FATAL("Unexpected onVsyncPeriodTimingChanged callback");
+ return Void();
+}
+
void HardwareComposer::ComposerCallback::SetVsyncService(
const sp<VsyncService>& vsync_service) {
std::lock_guard<std::mutex> lock(mutex_);
diff --git a/libs/vr/libvrflinger/hardware_composer.h b/libs/vr/libvrflinger/hardware_composer.h
index 989ce35..8698814 100644
--- a/libs/vr/libvrflinger/hardware_composer.h
+++ b/libs/vr/libvrflinger/hardware_composer.h
@@ -375,6 +375,12 @@
hardware::Return<void> onRefresh(Hwc2::Display display) override;
hardware::Return<void> onVsync(Hwc2::Display display,
int64_t timestamp) override;
+ hardware::Return<void> onVsync_2_4(
+ Hwc2::Display display, int64_t timestamp,
+ Hwc2::VsyncPeriodNanos vsyncPeriodNanos) override;
+ hardware::Return<void> onVsyncPeriodTimingChanged(
+ Hwc2::Display display,
+ const Hwc2::VsyncPeriodChangeTimeline& updatedTimeline) override;
bool GotFirstHotplug() { return got_first_hotplug_; }
void SetVsyncService(const sp<VsyncService>& vsync_service);
diff --git a/opengl/libs/Android.bp b/opengl/libs/Android.bp
index 48a68af..e255b9d 100644
--- a/opengl/libs/Android.bp
+++ b/opengl/libs/Android.bp
@@ -161,6 +161,10 @@
],
ldflags: ["-Wl,--exclude-libs=ALL,--Bsymbolic-functions"],
export_include_dirs: ["EGL/include"],
+ stubs: {
+ symbol_file: "libEGL.map.txt",
+ versions: ["29"],
+ },
}
cc_test {
diff --git a/services/inputflinger/dispatcher/Entry.cpp b/services/inputflinger/dispatcher/Entry.cpp
index 930c7c7..e925f5b 100644
--- a/services/inputflinger/dispatcher/Entry.cpp
+++ b/services/inputflinger/dispatcher/Entry.cpp
@@ -60,7 +60,7 @@
// --- EventEntry ---
-EventEntry::EventEntry(uint32_t sequenceNum, int32_t type, nsecs_t eventTime, uint32_t policyFlags)
+EventEntry::EventEntry(uint32_t sequenceNum, Type type, nsecs_t eventTime, uint32_t policyFlags)
: sequenceNum(sequenceNum),
refCount(1),
type(type),
@@ -92,7 +92,7 @@
// --- ConfigurationChangedEntry ---
ConfigurationChangedEntry::ConfigurationChangedEntry(uint32_t sequenceNum, nsecs_t eventTime)
- : EventEntry(sequenceNum, TYPE_CONFIGURATION_CHANGED, eventTime, 0) {}
+ : EventEntry(sequenceNum, Type::CONFIGURATION_CHANGED, eventTime, 0) {}
ConfigurationChangedEntry::~ConfigurationChangedEntry() {}
@@ -103,7 +103,7 @@
// --- DeviceResetEntry ---
DeviceResetEntry::DeviceResetEntry(uint32_t sequenceNum, nsecs_t eventTime, int32_t deviceId)
- : EventEntry(sequenceNum, TYPE_DEVICE_RESET, eventTime, 0), deviceId(deviceId) {}
+ : EventEntry(sequenceNum, Type::DEVICE_RESET, eventTime, 0), deviceId(deviceId) {}
DeviceResetEntry::~DeviceResetEntry() {}
@@ -117,7 +117,7 @@
int32_t displayId, uint32_t policyFlags, int32_t action, int32_t flags,
int32_t keyCode, int32_t scanCode, int32_t metaState, int32_t repeatCount,
nsecs_t downTime)
- : EventEntry(sequenceNum, TYPE_KEY, eventTime, policyFlags),
+ : EventEntry(sequenceNum, Type::KEY, eventTime, policyFlags),
deviceId(deviceId),
source(source),
displayId(displayId),
@@ -165,7 +165,7 @@
float xCursorPosition, float yCursorPosition, nsecs_t downTime,
uint32_t pointerCount, const PointerProperties* pointerProperties,
const PointerCoords* pointerCoords, float xOffset, float yOffset)
- : EventEntry(sequenceNum, TYPE_MOTION, eventTime, policyFlags),
+ : EventEntry(sequenceNum, Type::MOTION, eventTime, policyFlags),
eventTime(eventTime),
deviceId(deviceId),
source(source),
diff --git a/services/inputflinger/dispatcher/Entry.h b/services/inputflinger/dispatcher/Entry.h
index 28c2799..9dcaadc 100644
--- a/services/inputflinger/dispatcher/Entry.h
+++ b/services/inputflinger/dispatcher/Entry.h
@@ -33,11 +33,24 @@
constexpr uint32_t SYNTHESIZED_EVENT_SEQUENCE_NUM = 0;
struct EventEntry {
- enum { TYPE_CONFIGURATION_CHANGED, TYPE_DEVICE_RESET, TYPE_KEY, TYPE_MOTION };
+ enum class Type { CONFIGURATION_CHANGED, DEVICE_RESET, KEY, MOTION };
+
+ static const char* typeToString(Type type) {
+ switch (type) {
+ case Type::CONFIGURATION_CHANGED:
+ return "CONFIGURATION_CHANGED";
+ case Type::DEVICE_RESET:
+ return "DEVICE_RESET";
+ case Type::KEY:
+ return "KEY";
+ case Type::MOTION:
+ return "MOTION";
+ }
+ }
uint32_t sequenceNum;
mutable int32_t refCount;
- int32_t type;
+ Type type;
nsecs_t eventTime;
uint32_t policyFlags;
InjectionState* injectionState;
@@ -66,7 +79,7 @@
virtual void appendDescription(std::string& msg) const = 0;
protected:
- EventEntry(uint32_t sequenceNum, int32_t type, nsecs_t eventTime, uint32_t policyFlags);
+ EventEntry(uint32_t sequenceNum, Type type, nsecs_t eventTime, uint32_t policyFlags);
virtual ~EventEntry();
void releaseInjectionState();
};
diff --git a/services/inputflinger/dispatcher/InputDispatcher.cpp b/services/inputflinger/dispatcher/InputDispatcher.cpp
index 58a5b3c..c219941 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.cpp
+++ b/services/inputflinger/dispatcher/InputDispatcher.cpp
@@ -386,7 +386,7 @@
}
switch (mPendingEvent->type) {
- case EventEntry::TYPE_CONFIGURATION_CHANGED: {
+ case EventEntry::Type::CONFIGURATION_CHANGED: {
ConfigurationChangedEntry* typedEntry =
static_cast<ConfigurationChangedEntry*>(mPendingEvent);
done = dispatchConfigurationChangedLocked(currentTime, typedEntry);
@@ -394,14 +394,14 @@
break;
}
- case EventEntry::TYPE_DEVICE_RESET: {
+ case EventEntry::Type::DEVICE_RESET: {
DeviceResetEntry* typedEntry = static_cast<DeviceResetEntry*>(mPendingEvent);
done = dispatchDeviceResetLocked(currentTime, typedEntry);
dropReason = DropReason::NOT_DROPPED; // device resets are never dropped
break;
}
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
KeyEntry* typedEntry = static_cast<KeyEntry*>(mPendingEvent);
if (isAppSwitchDue) {
if (isAppSwitchKeyEvent(*typedEntry)) {
@@ -421,7 +421,7 @@
break;
}
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
MotionEntry* typedEntry = static_cast<MotionEntry*>(mPendingEvent);
if (dropReason == DropReason::NOT_DROPPED && isAppSwitchDue) {
dropReason = DropReason::APP_SWITCH;
@@ -435,10 +435,6 @@
done = dispatchMotionLocked(currentTime, typedEntry, &dropReason, nextWakeupTime);
break;
}
-
- default:
- ALOG_ASSERT(false);
- break;
}
if (done) {
@@ -458,7 +454,7 @@
traceInboundQueueLengthLocked();
switch (entry->type) {
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
// Optimize app switch latency.
// If the application takes too long to catch up then we drop all events preceding
// the app switch key.
@@ -480,7 +476,7 @@
break;
}
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
// Optimize case where the current application is unresponsive and the user
// decides to touch a window in a different application.
// If the application takes too long to catch up then we drop all events preceding
@@ -508,6 +504,11 @@
}
break;
}
+ case EventEntry::Type::CONFIGURATION_CHANGED:
+ case EventEntry::Type::DEVICE_RESET: {
+ // nothing to do
+ break;
+ }
}
return needWake;
@@ -627,12 +628,12 @@
}
switch (entry.type) {
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
CancelationOptions options(CancelationOptions::CANCEL_NON_POINTER_EVENTS, reason);
synthesizeCancelationEventsForAllConnectionsLocked(options);
break;
}
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
const MotionEntry& motionEntry = static_cast<const MotionEntry&>(entry);
if (motionEntry.source & AINPUT_SOURCE_CLASS_POINTER) {
CancelationOptions options(CancelationOptions::CANCEL_POINTER_EVENTS, reason);
@@ -643,6 +644,11 @@
}
break;
}
+ case EventEntry::Type::CONFIGURATION_CHANGED:
+ case EventEntry::Type::DEVICE_RESET: {
+ LOG_ALWAYS_FATAL("Should not drop %s events", EventEntry::typeToString(entry.type));
+ break;
+ }
}
}
@@ -1174,18 +1180,19 @@
int32_t InputDispatcher::getTargetDisplayId(const EventEntry& entry) {
int32_t displayId;
switch (entry.type) {
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
const KeyEntry& keyEntry = static_cast<const KeyEntry&>(entry);
displayId = keyEntry.displayId;
break;
}
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
const MotionEntry& motionEntry = static_cast<const MotionEntry&>(entry);
displayId = motionEntry.displayId;
break;
}
- default: {
- ALOGE("Unsupported event type '%" PRId32 "' for target display.", entry.type);
+ case EventEntry::Type::CONFIGURATION_CHANGED:
+ case EventEntry::Type::DEVICE_RESET: {
+ ALOGE("%s events do not have a target display", EventEntry::typeToString(entry.type));
return ADISPLAY_ID_NONE;
}
}
@@ -1849,7 +1856,7 @@
}
// Ensure that the dispatch queues aren't too far backed up for this event.
- if (eventEntry.type == EventEntry::TYPE_KEY) {
+ if (eventEntry.type == EventEntry::Type::KEY) {
// If the event is a key event, then we must wait for all previous events to
// complete before delivering it because previous events may have the
// side-effect of transferring focus to a different window and we want to
@@ -1937,7 +1944,7 @@
int32_t eventType = USER_ACTIVITY_EVENT_OTHER;
switch (eventEntry.type) {
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
const MotionEntry& motionEntry = static_cast<const MotionEntry&>(eventEntry);
if (motionEntry.action == AMOTION_EVENT_ACTION_CANCEL) {
return;
@@ -1948,7 +1955,7 @@
}
break;
}
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
const KeyEntry& keyEntry = static_cast<const KeyEntry&>(eventEntry);
if (keyEntry.flags & AKEY_EVENT_FLAG_CANCELED) {
return;
@@ -1956,6 +1963,12 @@
eventType = USER_ACTIVITY_EVENT_BUTTON;
break;
}
+ case EventEntry::Type::CONFIGURATION_CHANGED:
+ case EventEntry::Type::DEVICE_RESET: {
+ LOG_ALWAYS_FATAL("%s events are not user activity",
+ EventEntry::typeToString(eventEntry.type));
+ break;
+ }
}
std::unique_ptr<CommandEntry> commandEntry =
@@ -1996,7 +2009,7 @@
// Split a motion event if needed.
if (inputTarget->flags & InputTarget::FLAG_SPLIT) {
- ALOG_ASSERT(eventEntry->type == EventEntry::TYPE_MOTION);
+ ALOG_ASSERT(eventEntry->type == EventEntry::Type::MOTION);
const MotionEntry& originalMotionEntry = static_cast<const MotionEntry&>(*eventEntry);
if (inputTarget->pointerIds.count() != originalMotionEntry.pointerCount) {
@@ -2080,7 +2093,7 @@
// Apply target flags and update the connection's input state.
switch (eventEntry->type) {
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
const KeyEntry& keyEntry = static_cast<const KeyEntry&>(*eventEntry);
dispatchEntry->resolvedAction = keyEntry.action;
dispatchEntry->resolvedFlags = keyEntry.flags;
@@ -2097,7 +2110,7 @@
break;
}
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
const MotionEntry& motionEntry = static_cast<const MotionEntry&>(*eventEntry);
if (dispatchMode & InputTarget::FLAG_DISPATCH_AS_OUTSIDE) {
dispatchEntry->resolvedAction = AMOTION_EVENT_ACTION_OUTSIDE;
@@ -2147,6 +2160,12 @@
break;
}
+ case EventEntry::Type::CONFIGURATION_CHANGED:
+ case EventEntry::Type::DEVICE_RESET: {
+ LOG_ALWAYS_FATAL("%s events should not go to apps",
+ EventEntry::typeToString(eventEntry->type));
+ break;
+ }
}
// Remember that we are waiting for this dispatch to complete.
@@ -2206,7 +2225,7 @@
status_t status;
EventEntry* eventEntry = dispatchEntry->eventEntry;
switch (eventEntry->type) {
- case EventEntry::TYPE_KEY: {
+ case EventEntry::Type::KEY: {
KeyEntry* keyEntry = static_cast<KeyEntry*>(eventEntry);
// Publish the key event.
@@ -2221,7 +2240,7 @@
break;
}
- case EventEntry::TYPE_MOTION: {
+ case EventEntry::Type::MOTION: {
MotionEntry* motionEntry = static_cast<MotionEntry*>(eventEntry);
PointerCoords scaledCoords[MAX_POINTERS];
@@ -2502,15 +2521,23 @@
for (size_t i = 0; i < cancelationEvents.size(); i++) {
EventEntry* cancelationEventEntry = cancelationEvents[i];
switch (cancelationEventEntry->type) {
- case EventEntry::TYPE_KEY:
+ case EventEntry::Type::KEY: {
logOutboundKeyDetails("cancel - ",
static_cast<const KeyEntry&>(*cancelationEventEntry));
break;
- case EventEntry::TYPE_MOTION:
+ }
+ case EventEntry::Type::MOTION: {
logOutboundMotionDetails("cancel - ",
static_cast<const MotionEntry&>(
*cancelationEventEntry));
break;
+ }
+ case EventEntry::Type::CONFIGURATION_CHANGED:
+ case EventEntry::Type::DEVICE_RESET: {
+ LOG_ALWAYS_FATAL("%s event should not be found inside Connections's queue",
+ EventEntry::typeToString(cancelationEventEntry->type));
+ break;
+ }
}
InputTarget target;
@@ -4237,11 +4264,11 @@
}
bool restartEvent;
- if (dispatchEntry->eventEntry->type == EventEntry::TYPE_KEY) {
+ if (dispatchEntry->eventEntry->type == EventEntry::Type::KEY) {
KeyEntry* keyEntry = static_cast<KeyEntry*>(dispatchEntry->eventEntry);
restartEvent =
afterKeyEventLockedInterruptible(connection, dispatchEntry, keyEntry, handled);
- } else if (dispatchEntry->eventEntry->type == EventEntry::TYPE_MOTION) {
+ } else if (dispatchEntry->eventEntry->type == EventEntry::Type::MOTION) {
MotionEntry* motionEntry = static_cast<MotionEntry*>(dispatchEntry->eventEntry);
restartEvent = afterMotionEventLockedInterruptible(connection, dispatchEntry, motionEntry,
handled);
diff --git a/services/inputflinger/reader/mapper/SwitchInputMapper.cpp b/services/inputflinger/reader/mapper/SwitchInputMapper.cpp
index 4ff941f..16095b9 100644
--- a/services/inputflinger/reader/mapper/SwitchInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/SwitchInputMapper.cpp
@@ -56,8 +56,8 @@
void SwitchInputMapper::sync(nsecs_t when) {
if (mUpdatedSwitchMask) {
uint32_t updatedSwitchValues = mSwitchValues & mUpdatedSwitchMask;
- NotifySwitchArgs args(mContext->getNextSequenceNum(), when, 0, updatedSwitchValues,
- mUpdatedSwitchMask);
+ NotifySwitchArgs args(mContext->getNextSequenceNum(), when, 0 /*policyFlags*/,
+ updatedSwitchValues, mUpdatedSwitchMask);
getListener()->notifySwitch(&args);
mUpdatedSwitchMask = 0;
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index 8c1991e..8863ec2 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -365,55 +365,87 @@
class FakeInputReceiver {
public:
- void consumeEvent(int32_t expectedEventType, int32_t expectedDisplayId,
- int32_t expectedFlags = 0) {
+ InputEvent* consume() {
uint32_t consumeSeq;
InputEvent* event;
- status_t status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1,
- &consumeSeq, &event);
- ASSERT_EQ(OK, status)
- << mName.c_str() << ": consumer consume should return OK.";
- ASSERT_TRUE(event != nullptr)
- << mName.c_str() << ": consumer should have returned non-NULL event.";
+ std::chrono::time_point start = std::chrono::steady_clock::now();
+ status_t status = WOULD_BLOCK;
+ while (status == WOULD_BLOCK) {
+ status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1, &consumeSeq,
+ &event);
+ std::chrono::duration elapsed = std::chrono::steady_clock::now() - start;
+ if (elapsed > 100ms) {
+ break;
+ }
+ }
+
+ if (status == WOULD_BLOCK) {
+ // Just means there's no event available.
+ return nullptr;
+ }
+
+ if (status != OK) {
+ ADD_FAILURE() << mName.c_str() << ": consumer consume should return OK.";
+ return nullptr;
+ }
+ if (event == nullptr) {
+ ADD_FAILURE() << "Consumed correctly, but received NULL event from consumer";
+ return nullptr;
+ }
+
+ status = mConsumer->sendFinishedSignal(consumeSeq, handled());
+ if (status != OK) {
+ ADD_FAILURE() << mName.c_str() << ": consumer sendFinishedSignal should return OK.";
+ }
+ return event;
+ }
+
+ void consumeEvent(int32_t expectedEventType, int32_t expectedAction, int32_t expectedDisplayId,
+ int32_t expectedFlags) {
+ InputEvent* event = consume();
+
+ ASSERT_NE(nullptr, event) << mName.c_str()
+ << ": consumer should have returned non-NULL event.";
ASSERT_EQ(expectedEventType, event->getType())
<< mName.c_str() << ": event type should match.";
- ASSERT_EQ(expectedDisplayId, event->getDisplayId())
- << mName.c_str() << ": event displayId should be the same as expected.";
+ EXPECT_EQ(expectedDisplayId, event->getDisplayId());
- int32_t flags;
switch (expectedEventType) {
case AINPUT_EVENT_TYPE_KEY: {
- KeyEvent* typedEvent = static_cast<KeyEvent*>(event);
- flags = typedEvent->getFlags();
+ const KeyEvent& keyEvent = static_cast<const KeyEvent&>(*event);
+ EXPECT_EQ(expectedAction, keyEvent.getAction());
+ EXPECT_EQ(expectedFlags, keyEvent.getFlags());
break;
}
case AINPUT_EVENT_TYPE_MOTION: {
- MotionEvent* typedEvent = static_cast<MotionEvent*>(event);
- flags = typedEvent->getFlags();
+ const MotionEvent& motionEvent = static_cast<const MotionEvent&>(*event);
+ EXPECT_EQ(expectedAction, motionEvent.getAction());
+ EXPECT_EQ(expectedFlags, motionEvent.getFlags());
break;
}
default: {
FAIL() << mName.c_str() << ": invalid event type: " << expectedEventType;
}
}
- ASSERT_EQ(expectedFlags, flags)
- << mName.c_str() << ": event flags should be the same as expected.";
+ }
- status = mConsumer->sendFinishedSignal(consumeSeq, handled());
- ASSERT_EQ(OK, status)
- << mName.c_str() << ": consumer sendFinishedSignal should return OK.";
+ void consumeKeyDown(int32_t expectedDisplayId, int32_t expectedFlags = 0) {
+ consumeEvent(AINPUT_EVENT_TYPE_KEY, AKEY_EVENT_ACTION_DOWN, expectedDisplayId,
+ expectedFlags);
+ }
+
+ void consumeMotionDown(int32_t expectedDisplayId, int32_t expectedFlags = 0) {
+ consumeEvent(AINPUT_EVENT_TYPE_MOTION, AMOTION_EVENT_ACTION_DOWN, expectedDisplayId,
+ expectedFlags);
}
void assertNoEvents() {
- uint32_t consumeSeq;
- InputEvent* event;
- status_t status = mConsumer->consume(&mEventFactory, false /*consumeBatches*/, -1,
- &consumeSeq, &event);
- ASSERT_NE(OK, status)
+ InputEvent* event = consume();
+ ASSERT_EQ(nullptr, event)
<< mName.c_str()
- << ": should not have received any events, so consume(..) should not return OK.";
+ << ": should not have received any events, so consume() should return NULL";
}
protected:
@@ -611,7 +643,7 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Window should receive motion event.
- window->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ window->consumeMotionDown(ADISPLAY_ID_DEFAULT);
}
// The foreground window should receive the first touch down event.
@@ -632,7 +664,7 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Top window should receive the touch down event. Second window should not receive anything.
- windowTop->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowTop->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowSecond->assertNoEvents();
}
@@ -658,7 +690,7 @@
// Focused window should receive event.
windowTop->assertNoEvents();
- windowSecond->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowSecond->consumeKeyDown(ADISPLAY_ID_NONE);
}
TEST_F(InputDispatcherTest, SetInputWindow_FocusPriority) {
@@ -683,7 +715,7 @@
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
// Top focused window should receive event.
- windowTop->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowTop->consumeKeyDown(ADISPLAY_ID_NONE);
windowSecond->assertNoEvents();
}
@@ -713,7 +745,7 @@
// Top window is invalid, so it should not receive any input event.
windowTop->assertNoEvents();
- windowSecond->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowSecond->consumeKeyDown(ADISPLAY_ID_NONE);
}
TEST_F(InputDispatcherTest, DispatchMouseEventsUnderCursor) {
@@ -738,7 +770,7 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED,
injectMotionEvent(mDispatcher, AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_MOUSE,
ADISPLAY_ID_DEFAULT, 610, 400, 599, 400));
- windowLeft->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowLeft->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowRight->assertNoEvents();
}
@@ -794,7 +826,7 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectMotionDown(mDispatcher,
AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
// Test touch down on second display.
@@ -802,29 +834,29 @@
AINPUT_SOURCE_TOUCHSCREEN, SECOND_DISPLAY_ID))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
+ windowInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
}
TEST_F(InputDispatcherFocusOnTwoDisplaysTest, SetInputWindow_MultiDisplayFocus) {
// Test inject a key down with display id specified.
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectKeyDown(mDispatcher, ADISPLAY_ID_DEFAULT))
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeKeyDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
// Test inject a key down without display id specified.
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectKeyDown(mDispatcher))
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
// Remove secondary display.
std::vector<sp<InputWindowHandle>> noWindows;
mDispatcher->setInputWindows(noWindows, SECOND_DISPLAY_ID);
// Expect old focus should receive a cancel event.
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE,
- AKEY_EVENT_FLAG_CANCELED);
+ windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, AKEY_EVENT_ACTION_UP, ADISPLAY_ID_NONE,
+ AKEY_EVENT_FLAG_CANCELED);
// Test inject a key down, should timeout because of no target window.
ASSERT_EQ(INPUT_EVENT_INJECTION_TIMED_OUT, injectKeyDown(mDispatcher))
@@ -853,8 +885,8 @@
ASSERT_EQ(INPUT_EVENT_INJECTION_SUCCEEDED, injectMotionDown(mDispatcher,
AINPUT_SOURCE_TOUCHSCREEN, ADISPLAY_ID_DEFAULT))
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
- windowInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
- monitorInPrimary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_DEFAULT);
+ windowInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
+ monitorInPrimary->consumeMotionDown(ADISPLAY_ID_DEFAULT);
windowInSecondary->assertNoEvents();
monitorInSecondary->assertNoEvents();
@@ -864,8 +896,8 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, SECOND_DISPLAY_ID);
+ windowInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
+ monitorInSecondary->consumeMotionDown(SECOND_DISPLAY_ID);
// Test inject a non-pointer motion event.
// If specific a display, it will dispatch to the focused window of particular display,
@@ -875,8 +907,8 @@
<< "Inject motion event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_NONE);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_MOTION, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeMotionDown(ADISPLAY_ID_NONE);
+ monitorInSecondary->consumeMotionDown(ADISPLAY_ID_NONE);
}
// Test per-display input monitors for key event.
@@ -892,8 +924,8 @@
<< "Inject key event should return INPUT_EVENT_INJECTION_SUCCEEDED";
windowInPrimary->assertNoEvents();
monitorInPrimary->assertNoEvents();
- windowInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
- monitorInSecondary->consumeEvent(AINPUT_EVENT_TYPE_KEY, ADISPLAY_ID_NONE);
+ windowInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
+ monitorInSecondary->consumeKeyDown(ADISPLAY_ID_NONE);
}
class InputFilterTest : public InputDispatcherTest {
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index 91f3ffd..127a3da 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -167,9 +167,12 @@
"Scheduler/LayerInfo.cpp",
"Scheduler/MessageQueue.cpp",
"Scheduler/PhaseOffsets.cpp",
+ "Scheduler/RefreshRateConfigs.cpp",
"Scheduler/Scheduler.cpp",
"Scheduler/SchedulerUtils.cpp",
+ "Scheduler/Timer.cpp",
"Scheduler/VSyncDispatchTimerQueue.cpp",
+ "Scheduler/VSyncPredictor.cpp",
"Scheduler/VSyncModulator.cpp",
"StartPropertySetThread.cpp",
"SurfaceFlinger.cpp",
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index a25709c..94c4a81 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -77,9 +77,9 @@
// with the clone layer trying to use the deleted texture.
mFlinger->deleteTextureAsync(mTextureName);
}
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
}
void BufferLayer::useSurfaceDamage() {
@@ -286,7 +286,7 @@
return hasReadyFrame();
}
-bool BufferLayer::onPostComposition(const std::optional<DisplayId>& displayId,
+bool BufferLayer::onPostComposition(sp<const DisplayDevice> displayDevice,
const std::shared_ptr<FenceTime>& glDoneFence,
const std::shared_ptr<FenceTime>& presentFence,
const CompositorTiming& compositorTiming) {
@@ -305,8 +305,16 @@
nsecs_t desiredPresentTime = mBufferInfo.mDesiredPresentTime;
mFrameTracker.setDesiredPresentTime(desiredPresentTime);
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->setDesiredTime(layerID, mCurrentFrameNumber, desiredPresentTime);
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->setDesiredTime(layerId, mCurrentFrameNumber, desiredPresentTime);
+
+ const auto outputLayer = findOutputLayerForDisplay(displayDevice);
+ if (outputLayer && outputLayer->requiresClientComposition()) {
+ nsecs_t clientCompositionTimestamp = outputLayer->getState().clientCompositionTimestamp;
+ mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(), mCurrentFrameNumber,
+ clientCompositionTimestamp,
+ FrameTracer::FrameEvent::FALLBACK_COMPOSITION);
+ }
std::shared_ptr<FenceTime> frameReadyFence = mBufferInfo.mFenceTime;
if (frameReadyFence->isValid()) {
@@ -317,17 +325,18 @@
mFrameTracker.setFrameReadyTime(desiredPresentTime);
}
+ const auto displayId = displayDevice->getId();
if (presentFence->isValid()) {
- mFlinger->mTimeStats->setPresentFence(layerID, mCurrentFrameNumber, presentFence);
- mFlinger->mFrameTracer->traceFence(layerID, getCurrentBufferId(), mCurrentFrameNumber,
+ mFlinger->mTimeStats->setPresentFence(layerId, mCurrentFrameNumber, presentFence);
+ mFlinger->mFrameTracer->traceFence(layerId, getCurrentBufferId(), mCurrentFrameNumber,
presentFence, FrameTracer::FrameEvent::PRESENT_FENCE);
mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
} else if (displayId && mFlinger->getHwComposer().isConnected(*displayId)) {
// The HWC doesn't support present fences, so use the refresh
// timestamp instead.
const nsecs_t actualPresentTime = mFlinger->getHwComposer().getRefreshTimestamp(*displayId);
- mFlinger->mTimeStats->setPresentTime(layerID, mCurrentFrameNumber, actualPresentTime);
- mFlinger->mFrameTracer->traceTimestamp(layerID, getCurrentBufferId(), mCurrentFrameNumber,
+ mFlinger->mTimeStats->setPresentTime(layerId, mCurrentFrameNumber, actualPresentTime);
+ mFlinger->mFrameTracer->traceTimestamp(layerId, getCurrentBufferId(), mCurrentFrameNumber,
actualPresentTime,
FrameTracer::FrameEvent::PRESENT_FENCE);
mFrameTracker.setActualPresentTime(actualPresentTime);
diff --git a/services/surfaceflinger/BufferLayer.h b/services/surfaceflinger/BufferLayer.h
index 656ba12..16855d2 100644
--- a/services/surfaceflinger/BufferLayer.h
+++ b/services/surfaceflinger/BufferLayer.h
@@ -78,7 +78,7 @@
bool isHdrY410() const override;
- bool onPostComposition(const std::optional<DisplayId>& displayId,
+ bool onPostComposition(sp<const DisplayDevice> displayDevice,
const std::shared_ptr<FenceTime>& glDoneFence,
const std::shared_ptr<FenceTime>& presentFence,
const CompositorTiming& compositorTiming) override;
diff --git a/services/surfaceflinger/BufferQueueLayer.cpp b/services/surfaceflinger/BufferQueueLayer.cpp
index 6896da7..d51d34b 100644
--- a/services/surfaceflinger/BufferQueueLayer.cpp
+++ b/services/surfaceflinger/BufferQueueLayer.cpp
@@ -223,7 +223,7 @@
// BufferItem's that weren't actually queued. This can happen in shared
// buffer mode.
bool queuedBuffer = false;
- const int32_t layerID = getSequence();
+ const int32_t layerId = getSequence();
LayerRejecter r(mDrawingState, getCurrentState(), recomputeVisibleRegions,
getProducerStickyTransform() != 0, mName, mOverrideScalingMode,
getTransformToDisplayInverse());
@@ -264,7 +264,7 @@
if (queuedBuffer) {
Mutex::Autolock lock(mQueueItemLock);
mConsumer->mergeSurfaceDamage(mQueueItems[0].mSurfaceDamage);
- mFlinger->mTimeStats->removeTimeRecord(layerID, mQueueItems[0].mFrameNumber);
+ mFlinger->mTimeStats->removeTimeRecord(layerId, mQueueItems[0].mFrameNumber);
mQueueItems.removeAt(0);
mQueuedFrames--;
}
@@ -278,8 +278,8 @@
Mutex::Autolock lock(mQueueItemLock);
mQueueItems.clear();
mQueuedFrames = 0;
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
}
// Once we have hit this state, the shadow queue may no longer
@@ -301,19 +301,17 @@
// updateTexImage
while (mQueueItems[0].mFrameNumber != currentFrameNumber) {
mConsumer->mergeSurfaceDamage(mQueueItems[0].mSurfaceDamage);
- mFlinger->mTimeStats->removeTimeRecord(layerID, mQueueItems[0].mFrameNumber);
+ mFlinger->mTimeStats->removeTimeRecord(layerId, mQueueItems[0].mFrameNumber);
mQueueItems.removeAt(0);
mQueuedFrames--;
}
uint64_t bufferID = mQueueItems[0].mGraphicBuffer->getId();
- mFlinger->mTimeStats->setAcquireFence(layerID, currentFrameNumber,
- mQueueItems[0].mFenceTime);
- mFlinger->mFrameTracer->traceFence(layerID, bufferID, currentFrameNumber,
+ mFlinger->mFrameTracer->traceFence(layerId, bufferID, currentFrameNumber,
mQueueItems[0].mFenceTime,
FrameTracer::FrameEvent::ACQUIRE_FENCE);
- mFlinger->mTimeStats->setLatchTime(layerID, currentFrameNumber, latchTime);
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferID, currentFrameNumber, latchTime,
+ mFlinger->mTimeStats->setLatchTime(layerId, currentFrameNumber, latchTime);
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferID, currentFrameNumber, latchTime,
FrameTracer::FrameEvent::LATCH);
mQueueItems.removeAt(0);
@@ -373,28 +371,28 @@
// -----------------------------------------------------------------------
void BufferQueueLayer::onFrameDequeued(const uint64_t bufferId) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceNewLayer(layerID, getName().c_str());
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceNewLayer(layerId, getName().c_str());
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
systemTime(), FrameTracer::FrameEvent::DEQUEUE);
}
void BufferQueueLayer::onFrameDetached(const uint64_t bufferId) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceNewLayer(layerID, getName().c_str());
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceNewLayer(layerId, getName().c_str());
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
systemTime(), FrameTracer::FrameEvent::DETACH);
}
void BufferQueueLayer::onFrameCancelled(const uint64_t bufferId) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, FrameTracer::UNSPECIFIED_FRAME_NUMBER,
systemTime(), FrameTracer::FrameEvent::CANCEL);
}
void BufferQueueLayer::onFrameAvailable(const BufferItem& item) {
- const int32_t layerID = getSequence();
- mFlinger->mFrameTracer->traceTimestamp(layerID, item.mGraphicBuffer->getId(), item.mFrameNumber,
+ const int32_t layerId = getSequence();
+ mFlinger->mFrameTracer->traceTimestamp(layerId, item.mGraphicBuffer->getId(), item.mFrameNumber,
systemTime(), FrameTracer::FrameEvent::QUEUE);
ATRACE_CALL();
diff --git a/services/surfaceflinger/BufferStateLayer.cpp b/services/surfaceflinger/BufferStateLayer.cpp
index 33cd0dc..d68fe8e 100644
--- a/services/surfaceflinger/BufferStateLayer.cpp
+++ b/services/surfaceflinger/BufferStateLayer.cpp
@@ -240,10 +240,10 @@
mCurrentState.modified = true;
setTransactionFlags(eTransactionNeeded);
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->setPostTime(layerID, mFrameNumber, getName().c_str(), postTime);
- mFlinger->mFrameTracer->traceNewLayer(layerID, getName().c_str());
- mFlinger->mFrameTracer->traceTimestamp(layerID, buffer->getId(), mFrameNumber, postTime,
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->setPostTime(layerId, mFrameNumber, getName().c_str(), postTime);
+ mFlinger->mFrameTracer->traceNewLayer(layerId, getName().c_str());
+ mFlinger->mFrameTracer->traceTimestamp(layerId, buffer->getId(), mFrameNumber, postTime,
FrameTracer::FrameEvent::POST);
mCurrentState.desiredPresentTime = desiredPresentTime;
@@ -458,7 +458,7 @@
return NO_ERROR;
}
- const int32_t layerID = getSequence();
+ const int32_t layerId = getSequence();
// Reject if the layer is invalid
uint32_t bufferWidth = s.buffer->width;
@@ -480,7 +480,7 @@
ALOGE("[%s] rejecting buffer: "
"bufferWidth=%d, bufferHeight=%d, front.active.{w=%d, h=%d}",
getDebugName(), bufferWidth, bufferHeight, s.active.w, s.active.h);
- mFlinger->mTimeStats->removeTimeRecord(layerID, mFrameNumber);
+ mFlinger->mTimeStats->removeTimeRecord(layerId, mFrameNumber);
return BAD_VALUE;
}
@@ -497,18 +497,18 @@
// a GL-composited layer) not at all.
status_t err = bindTextureImage();
if (err != NO_ERROR) {
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
return BAD_VALUE;
}
}
const uint64_t bufferID = getCurrentBufferId();
- mFlinger->mTimeStats->setAcquireFence(layerID, mFrameNumber, mBufferInfo.mFenceTime);
- mFlinger->mFrameTracer->traceFence(layerID, bufferID, mFrameNumber, mBufferInfo.mFenceTime,
+ mFlinger->mTimeStats->setAcquireFence(layerId, mFrameNumber, mBufferInfo.mFenceTime);
+ mFlinger->mFrameTracer->traceFence(layerId, bufferID, mFrameNumber, mBufferInfo.mFenceTime,
FrameTracer::FrameEvent::ACQUIRE_FENCE);
- mFlinger->mTimeStats->setLatchTime(layerID, mFrameNumber, latchTime);
- mFlinger->mFrameTracer->traceTimestamp(layerID, bufferID, mFrameNumber, latchTime,
+ mFlinger->mTimeStats->setLatchTime(layerId, mFrameNumber, latchTime);
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferID, mFrameNumber, latchTime,
FrameTracer::FrameEvent::LATCH);
mCurrentStateModified = false;
diff --git a/services/surfaceflinger/ClientCache.cpp b/services/surfaceflinger/ClientCache.cpp
index 16fe27c..a5be01c 100644
--- a/services/surfaceflinger/ClientCache.cpp
+++ b/services/surfaceflinger/ClientCache.cpp
@@ -42,7 +42,7 @@
return false;
}
- auto& processBuffers = it->second;
+ auto& processBuffers = it->second.second;
auto bufItr = processBuffers.find(id);
if (bufItr == processBuffers.end()) {
@@ -86,12 +86,14 @@
return false;
}
auto [itr, success] =
- mBuffers.emplace(processToken, std::unordered_map<uint64_t, ClientCacheBuffer>());
+ mBuffers.emplace(processToken,
+ std::make_pair(token,
+ std::unordered_map<uint64_t, ClientCacheBuffer>()));
LOG_ALWAYS_FATAL_IF(!success, "failed to insert new process into client cache");
it = itr;
}
- auto& processBuffers = it->second;
+ auto& processBuffers = it->second.second;
if (processBuffers.size() > BUFFER_CACHE_MAX_SIZE) {
ALOGE("failed to cache buffer: cache is full");
@@ -120,7 +122,7 @@
}
}
- mBuffers[processToken].erase(id);
+ mBuffers[processToken].second.erase(id);
}
for (auto& recipient : pendingErase) {
@@ -180,7 +182,7 @@
return;
}
- for (auto& [id, clientCacheBuffer] : itr->second) {
+ for (auto& [id, clientCacheBuffer] : itr->second.second) {
client_cache_t cacheId = {processToken, id};
for (auto& recipient : clientCacheBuffer.recipients) {
sp<ErasedRecipient> erasedRecipient = recipient.promote();
diff --git a/services/surfaceflinger/ClientCache.h b/services/surfaceflinger/ClientCache.h
index aa6c80d..d7af7c0 100644
--- a/services/surfaceflinger/ClientCache.h
+++ b/services/surfaceflinger/ClientCache.h
@@ -61,7 +61,8 @@
std::set<wp<ErasedRecipient>> recipients;
};
std::map<wp<IBinder> /*caching process*/,
- std::unordered_map<uint64_t /*cache id*/, ClientCacheBuffer>>
+ std::pair<sp<IBinder> /*strong ref to caching process*/,
+ std::unordered_map<uint64_t /*cache id*/, ClientCacheBuffer>>>
mBuffers GUARDED_BY(mMutex);
class CacheDeathRecipient : public IBinder::DeathRecipient {
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
index 1347449..11cfccc 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayerCompositionState.h
@@ -98,6 +98,9 @@
// Debugging
void dump(std::string& result) const;
+
+ // Timestamp for when the layer is queued for client composition
+ nsecs_t clientCompositionTimestamp;
};
} // namespace compositionengine::impl
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index 1953005..6877f8b 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -904,6 +904,7 @@
layerSettings.disableBlending = true;
}
+ layer->editState().clientCompositionTimestamp = systemTime();
clientCompositionLayers.push_back(*result);
}
}
diff --git a/services/surfaceflinger/CompositionEngine/tests/CallOrderStateMachineHelper.h b/services/surfaceflinger/CompositionEngine/tests/CallOrderStateMachineHelper.h
new file mode 100644
index 0000000..2675dcf
--- /dev/null
+++ b/services/surfaceflinger/CompositionEngine/tests/CallOrderStateMachineHelper.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/**
+ * CallOrderStateMachineHelper is a helper class for setting up a compile-time
+ * checked state machine that a sequence of calls is correct for completely
+ * setting up the state for some other type.
+ *
+ * Two examples where this could be used are with setting up a "Builder" flow
+ * for initializing an instance of some type, and writing tests where the state
+ * machine sets up expectations and preconditions, calls the function under
+ * test, and then evaluations postconditions.
+ *
+ * The purpose of this helper is to offload some of the boilerplate code to
+ * simplify the actual state classes, and is also a place to document how to
+ * go about setting up the state classes.
+ *
+ * To work at compile time, the idea is that each state is a unique C++ type,
+ * and the valid transitions between states are given by member functions on
+ * those types, with those functions returning a simple value type expressing
+ * the new state to use. Illegal state transitions become a compile error because
+ * a named member function does not exist.
+ *
+ * Example usage in a test:
+ *
+ * A two step (+ terminator step) setup process can defined using:
+ *
+ * class Step1 : public CallOrderStateMachineHelper<TestFixtureType, Step1> {
+ * [[nodiscard]] auto firstMockCalledWith(int value1) {
+ * // Set up an expectation or initial state using the fixture
+ * EXPECT_CALL(getInstance->firstMock, FirstCall(value1));
+ * return nextState<Step2>();
+ * }
+ * };
+ *
+ * class Step2 : public CallOrderStateMachineHelper<TestFixtureType, Step2> {
+ * [[nodiscard]] auto secondMockCalledWith(int value2) {
+ * // Set up an expectation or initial state using the fixture
+ * EXPECT_CALL(getInstance()->secondMock, SecondCall(value2));
+ * return nextState<StepExecute>();
+ * }
+ * };
+ *
+ * class StepExecute : public CallOrderStateMachineHelper<TestFixtureType, Step3> {
+ * void execute() {
+ * invokeFunctionUnderTest();
+ * }
+ * };
+ *
+ * Note how the non-terminator steps return by value and use [[nodiscard]] to
+ * enforce the setup flow. Only the terminator step returns void.
+ *
+ * This can then be used in the tests with:
+ *
+ * Step1::make(this).firstMockCalledWith(value1)
+ * .secondMockCalledWith(value2)
+ * .execute);
+ *
+ * If the test fixture defines a `verify()` helper function which returns
+ * `Step1::make(this)`, this can be simplified to:
+ *
+ * verify().firstMockCalledWith(value1)
+ * .secondMockCalledWith(value2)
+ * .execute();
+ *
+ * This is equivalent to the following calls made by the text function:
+ *
+ * EXPECT_CALL(firstMock, FirstCall(value1));
+ * EXPECT_CALL(secondMock, SecondCall(value2));
+ * invokeFunctionUnderTest();
+ */
+template <typename InstanceType, typename CurrentStateType>
+class CallOrderStateMachineHelper {
+public:
+ CallOrderStateMachineHelper() = default;
+
+ // Disallow copying
+ CallOrderStateMachineHelper(const CallOrderStateMachineHelper&) = delete;
+ CallOrderStateMachineHelper& operator=(const CallOrderStateMachineHelper&) = delete;
+
+ // Moving is intended use case.
+ CallOrderStateMachineHelper(CallOrderStateMachineHelper&&) = default;
+ CallOrderStateMachineHelper& operator=(CallOrderStateMachineHelper&&) = default;
+
+ // Using a static "Make" function means the CurrentStateType classes do not
+ // need anything other than a default no-argument constructor.
+ static CurrentStateType make(InstanceType* instance) {
+ auto helper = CurrentStateType();
+ helper.mInstance = instance;
+ return helper;
+ }
+
+ // Each non-terminal state function
+ template <typename NextStateType>
+ auto nextState() {
+ // Note: Further operations on the current state become undefined
+ // operations as the instance pointer is moved to the next state type.
+ // But that doesn't stop someone from storing an intermediate state
+ // instance as a local and possibly calling one than one member function
+ // on it. By swapping with nullptr, we at least can try to catch this
+ // this at runtime.
+ InstanceType* instance = nullptr;
+ std::swap(instance, mInstance);
+ return NextStateType::make(instance);
+ }
+
+ InstanceType* getInstance() const { return mInstance; }
+
+private:
+ InstanceType* mInstance;
+};
diff --git a/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp b/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
index c07dfbb..21b9aa9 100644
--- a/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
@@ -19,35 +19,6 @@
#include <compositionengine/mock/CompositionEngine.h>
#include <gtest/gtest.h>
-namespace android::hardware::graphics::common::V1_1 {
-
-// Note: These operator overloads need to be defined in the same namespace as
-// the values they print.
-
-std::ostream& operator<<(std::ostream& os, const RenderIntent& value) {
- return os << toString(value) << " (" << static_cast<std::underlying_type_t<Dataspace>>(value)
- << ")";
-}
-
-} // namespace android::hardware::graphics::common::V1_1
-
-namespace android::hardware::graphics::common::V1_2 {
-
-// Note: These operator overloads need to be defined in the same namespace as
-// the values they print.
-
-std::ostream& operator<<(std::ostream& os, const Dataspace& value) {
- return os << toString(value) << " (" << static_cast<std::underlying_type_t<Dataspace>>(value)
- << ")";
-}
-
-std::ostream& operator<<(std::ostream& os, const ColorMode& value) {
- return os << toString(value) << " (" << static_cast<std::underlying_type_t<Dataspace>>(value)
- << ")";
-}
-
-} // namespace android::hardware::graphics::common::V1_2
-
namespace android::compositionengine {
namespace {
diff --git a/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h b/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h
index 5cfec77..364661b 100644
--- a/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h
+++ b/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h
@@ -81,6 +81,11 @@
MOCK_CONST_METHOD1(getColorModes, std::vector<ui::ColorMode>(DisplayId));
MOCK_METHOD3(setActiveColorMode, status_t(DisplayId, ui::ColorMode, ui::RenderIntent));
MOCK_CONST_METHOD0(isUsingVrComposer, bool());
+ MOCK_CONST_METHOD1(isVsyncPeriodSwitchSupported, bool(DisplayId));
+ MOCK_CONST_METHOD1(getDisplayVsyncPeriod, nsecs_t(DisplayId));
+ MOCK_METHOD4(setActiveConfigWithConstraints,
+ status_t(DisplayId, size_t, const HWC2::VsyncPeriodChangeConstraints&,
+ HWC2::VsyncPeriodChangeTimeline*));
MOCK_CONST_METHOD1(dump, void(std::string&));
MOCK_CONST_METHOD0(getComposer, android::Hwc2::Composer*());
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
index 7fce520..a9a735a 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
@@ -16,6 +16,7 @@
#include <cmath>
+#include <android-base/stringprintf.h>
#include <compositionengine/LayerFECompositionState.h>
#include <compositionengine/impl/Output.h>
#include <compositionengine/impl/OutputCompositionState.h>
@@ -31,6 +32,8 @@
#include <ui/Rect.h>
#include <ui/Region.h>
+#include "CallOrderStateMachineHelper.h"
+#include "MockHWC2.h"
#include "RegionMatcher.h"
#include "TransformMatcher.h"
@@ -38,8 +41,16 @@
namespace {
using testing::_;
+using testing::ByMove;
+using testing::DoAll;
+using testing::Eq;
+using testing::InSequence;
+using testing::Mock;
+using testing::Property;
+using testing::Ref;
using testing::Return;
using testing::ReturnRef;
+using testing::SetArgPointee;
using testing::StrictMock;
constexpr auto TR_IDENT = 0u;
@@ -49,6 +60,34 @@
const mat4 kNonIdentityHalf = mat4() * 0.5;
const mat4 kNonIdentityQuarter = mat4() * 0.25;
+constexpr OutputColorSetting kVendorSpecifiedOutputColorSetting =
+ static_cast<OutputColorSetting>(0x100);
+
+struct OutputPartialMockBase : public impl::Output {
+ // compositionengine::Output overrides
+ const OutputCompositionState& getState() const override { return mState; }
+ OutputCompositionState& editState() override { return mState; }
+
+ // Use mocks for all the remaining virtual functions
+ // not implemented by the base implementation class.
+ MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
+ MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex, compositionengine::OutputLayer*(size_t));
+ MOCK_METHOD3(ensureOutputLayer,
+ compositionengine::OutputLayer*(std::optional<size_t>,
+ const std::shared_ptr<compositionengine::Layer>&,
+ const sp<LayerFE>&));
+ MOCK_METHOD0(finalizePendingOutputLayers, void());
+ MOCK_METHOD0(clearOutputLayers, void());
+ MOCK_CONST_METHOD1(dumpState, void(std::string&));
+ MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
+ MOCK_METHOD2(injectOutputLayerForTest,
+ compositionengine::OutputLayer*(const std::shared_ptr<compositionengine::Layer>&,
+ const sp<LayerFE>&));
+ MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
+
+ impl::OutputCompositionState mState;
+};
+
struct OutputTest : public testing::Test {
class Output : public impl::Output {
public:
@@ -77,8 +116,70 @@
std::shared_ptr<Output> mOutput = createOutput(mCompositionEngine);
};
+// Extension of the base test useful for checking interactions with the LayerFE
+// functions to latch composition state.
+struct OutputLatchFEStateTest : public OutputTest {
+ OutputLatchFEStateTest() {
+ EXPECT_CALL(*mOutputLayer1, getLayer()).WillRepeatedly(ReturnRef(mLayer1));
+ EXPECT_CALL(*mOutputLayer2, getLayer()).WillRepeatedly(ReturnRef(mLayer2));
+ EXPECT_CALL(*mOutputLayer3, getLayer()).WillRepeatedly(ReturnRef(mLayer3));
+
+ EXPECT_CALL(*mOutputLayer1, getLayerFE()).WillRepeatedly(ReturnRef(mLayer1FE));
+ EXPECT_CALL(*mOutputLayer2, getLayerFE()).WillRepeatedly(ReturnRef(mLayer2FE));
+ EXPECT_CALL(*mOutputLayer3, getLayerFE()).WillRepeatedly(ReturnRef(mLayer3FE));
+
+ EXPECT_CALL(mLayer1, editFEState()).WillRepeatedly(ReturnRef(mLayer1FEState));
+ EXPECT_CALL(mLayer2, editFEState()).WillRepeatedly(ReturnRef(mLayer2FEState));
+ EXPECT_CALL(mLayer3, editFEState()).WillRepeatedly(ReturnRef(mLayer3FEState));
+ }
+
+ void injectLayer(std::unique_ptr<mock::OutputLayer> layer) {
+ mOutput->injectOutputLayerForTest(std::unique_ptr<OutputLayer>(layer.release()));
+ }
+
+ std::unique_ptr<mock::OutputLayer> mOutputLayer1{new StrictMock<mock::OutputLayer>};
+ std::unique_ptr<mock::OutputLayer> mOutputLayer2{new StrictMock<mock::OutputLayer>};
+ std::unique_ptr<mock::OutputLayer> mOutputLayer3{new StrictMock<mock::OutputLayer>};
+
+ StrictMock<mock::Layer> mLayer1;
+ StrictMock<mock::Layer> mLayer2;
+ StrictMock<mock::Layer> mLayer3;
+
+ StrictMock<mock::LayerFE> mLayer1FE;
+ StrictMock<mock::LayerFE> mLayer2FE;
+ StrictMock<mock::LayerFE> mLayer3FE;
+
+ LayerFECompositionState mLayer1FEState;
+ LayerFECompositionState mLayer2FEState;
+ LayerFECompositionState mLayer3FEState;
+};
+
const Rect OutputTest::kDefaultDisplaySize{100, 200};
+using ColorProfile = compositionengine::Output::ColorProfile;
+
+void dumpColorProfile(ColorProfile profile, std::string& result, const char* name) {
+ android::base::StringAppendF(&result, "%s (%s[%d] %s[%d] %s[%d] %s[%d]) ", name,
+ toString(profile.mode).c_str(), profile.mode,
+ toString(profile.dataspace).c_str(), profile.dataspace,
+ toString(profile.renderIntent).c_str(), profile.renderIntent,
+ toString(profile.colorSpaceAgnosticDataspace).c_str(),
+ profile.colorSpaceAgnosticDataspace);
+}
+
+// Checks for a ColorProfile match
+MATCHER_P(ColorProfileEq, expected, "") {
+ std::string buf;
+ buf.append("ColorProfiles are not equal\n");
+ dumpColorProfile(expected, buf, "expected value");
+ dumpColorProfile(arg, buf, "actual value");
+ *result_listener << buf;
+
+ return (expected.mode == arg.mode) && (expected.dataspace == arg.dataspace) &&
+ (expected.renderIntent == arg.renderIntent) &&
+ (expected.colorSpaceAgnosticDataspace == arg.colorSpaceAgnosticDataspace);
+}
+
/*
* Basic construction
*/
@@ -268,10 +369,12 @@
}
/*
- * Output::setColorMode
+ * Output::setColorProfile
*/
-TEST_F(OutputTest, setColorModeSetsStateAndDirtiesOutputIfChanged) {
+using OutputSetColorProfileTest = OutputTest;
+
+TEST_F(OutputSetColorProfileTest, setsStateAndDirtiesOutputIfChanged) {
using ColorProfile = Output::ColorProfile;
EXPECT_CALL(*mDisplayColorProfile,
@@ -292,7 +395,7 @@
EXPECT_THAT(mOutput->getState().dirtyRegion, RegionEq(Region(kDefaultDisplaySize)));
}
-TEST_F(OutputTest, setColorModeDoesNothingIfNoChange) {
+TEST_F(OutputSetColorProfileTest, doesNothingIfNoChange) {
using ColorProfile = Output::ColorProfile;
EXPECT_CALL(*mDisplayColorProfile,
@@ -483,26 +586,163 @@
}
/*
+ * Output::setReleasedLayers()
+ */
+
+using OutputSetReleasedLayersTest = OutputTest;
+
+TEST_F(OutputSetReleasedLayersTest, setReleasedLayersTakesGivenLayers) {
+ sp<StrictMock<mock::LayerFE>> layer1FE{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> layer2FE{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> layer3FE{new StrictMock<mock::LayerFE>()};
+
+ Output::ReleasedLayers layers;
+ layers.push_back(layer1FE);
+ layers.push_back(layer2FE);
+ layers.push_back(layer3FE);
+
+ mOutput->setReleasedLayers(std::move(layers));
+
+ const auto& setLayers = mOutput->getReleasedLayersForTest();
+ ASSERT_EQ(3u, setLayers.size());
+ ASSERT_EQ(layer1FE.get(), setLayers[0].promote().get());
+ ASSERT_EQ(layer2FE.get(), setLayers[1].promote().get());
+ ASSERT_EQ(layer3FE.get(), setLayers[2].promote().get());
+}
+
+/*
+ * Output::updateLayerStateFromFE()
+ */
+
+using OutputUpdateLayerStateFromFETest = OutputLatchFEStateTest;
+
+TEST_F(OutputUpdateLayerStateFromFETest, handlesNoOutputLayerCase) {
+ CompositionRefreshArgs refreshArgs;
+
+ mOutput->updateLayerStateFromFE(refreshArgs);
+}
+
+TEST_F(OutputUpdateLayerStateFromFETest, latchesContentStateForAllContainedLayers) {
+ EXPECT_CALL(mLayer1FE,
+ latchCompositionState(Ref(mLayer1FEState), LayerFE::StateSubset::Content));
+ EXPECT_CALL(mLayer2FE,
+ latchCompositionState(Ref(mLayer2FEState), LayerFE::StateSubset::Content));
+ EXPECT_CALL(mLayer3FE,
+ latchCompositionState(Ref(mLayer3FEState), LayerFE::StateSubset::Content));
+
+ // Note: Must be performed after any expectations on these mocks
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ CompositionRefreshArgs refreshArgs;
+ refreshArgs.updatingGeometryThisFrame = false;
+
+ mOutput->updateLayerStateFromFE(refreshArgs);
+}
+
+TEST_F(OutputUpdateLayerStateFromFETest, latchesGeometryAndContentStateForAllContainedLayers) {
+ EXPECT_CALL(mLayer1FE,
+ latchCompositionState(Ref(mLayer1FEState),
+ LayerFE::StateSubset::GeometryAndContent));
+ EXPECT_CALL(mLayer2FE,
+ latchCompositionState(Ref(mLayer2FEState),
+ LayerFE::StateSubset::GeometryAndContent));
+ EXPECT_CALL(mLayer3FE,
+ latchCompositionState(Ref(mLayer3FEState),
+ LayerFE::StateSubset::GeometryAndContent));
+
+ // Note: Must be performed after any expectations on these mocks
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ CompositionRefreshArgs refreshArgs;
+ refreshArgs.updatingGeometryThisFrame = true;
+
+ mOutput->updateLayerStateFromFE(refreshArgs);
+}
+
+/*
* Output::updateAndWriteCompositionState()
*/
-TEST_F(OutputTest, updateAndWriteCompositionState_takesEarlyOutIfNotEnabled) {
- mOutput->editState().isEnabled = false;
+using OutputUpdateAndWriteCompositionStateTest = OutputLatchFEStateTest;
+
+TEST_F(OutputUpdateAndWriteCompositionStateTest, doesNothingIfLayers) {
+ mOutput->editState().isEnabled = true;
CompositionRefreshArgs args;
mOutput->updateAndWriteCompositionState(args);
}
-TEST_F(OutputTest, updateAndWriteCompositionState_updatesLayers) {
- mOutput->editState().isEnabled = true;
- mock::OutputLayer* outputLayer = new StrictMock<mock::OutputLayer>();
- mOutput->injectOutputLayerForTest(std::unique_ptr<OutputLayer>(outputLayer));
+TEST_F(OutputUpdateAndWriteCompositionStateTest, doesNothingIfOutputNotEnabled) {
+ mOutput->editState().isEnabled = false;
- EXPECT_CALL(*outputLayer, updateCompositionState(true, true)).Times(1);
- EXPECT_CALL(*outputLayer, writeStateToHWC(true)).Times(1);
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ CompositionRefreshArgs args;
+ mOutput->updateAndWriteCompositionState(args);
+}
+
+TEST_F(OutputUpdateAndWriteCompositionStateTest, updatesLayerContentForAllLayers) {
+ EXPECT_CALL(*mOutputLayer1, updateCompositionState(false, false));
+ EXPECT_CALL(*mOutputLayer1, writeStateToHWC(false));
+ EXPECT_CALL(*mOutputLayer2, updateCompositionState(false, false));
+ EXPECT_CALL(*mOutputLayer2, writeStateToHWC(false));
+ EXPECT_CALL(*mOutputLayer3, updateCompositionState(false, false));
+ EXPECT_CALL(*mOutputLayer3, writeStateToHWC(false));
+
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ mOutput->editState().isEnabled = true;
+
+ CompositionRefreshArgs args;
+ args.updatingGeometryThisFrame = false;
+ args.devOptForceClientComposition = false;
+ mOutput->updateAndWriteCompositionState(args);
+}
+
+TEST_F(OutputUpdateAndWriteCompositionStateTest, updatesLayerGeometryAndContentForAllLayers) {
+ EXPECT_CALL(*mOutputLayer1, updateCompositionState(true, false));
+ EXPECT_CALL(*mOutputLayer1, writeStateToHWC(true));
+ EXPECT_CALL(*mOutputLayer2, updateCompositionState(true, false));
+ EXPECT_CALL(*mOutputLayer2, writeStateToHWC(true));
+ EXPECT_CALL(*mOutputLayer3, updateCompositionState(true, false));
+ EXPECT_CALL(*mOutputLayer3, writeStateToHWC(true));
+
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ mOutput->editState().isEnabled = true;
CompositionRefreshArgs args;
args.updatingGeometryThisFrame = true;
+ args.devOptForceClientComposition = false;
+ mOutput->updateAndWriteCompositionState(args);
+}
+
+TEST_F(OutputUpdateAndWriteCompositionStateTest, forcesClientCompositionForAllLayers) {
+ EXPECT_CALL(*mOutputLayer1, updateCompositionState(false, true));
+ EXPECT_CALL(*mOutputLayer1, writeStateToHWC(false));
+ EXPECT_CALL(*mOutputLayer2, updateCompositionState(false, true));
+ EXPECT_CALL(*mOutputLayer2, writeStateToHWC(false));
+ EXPECT_CALL(*mOutputLayer3, updateCompositionState(false, true));
+ EXPECT_CALL(*mOutputLayer3, writeStateToHWC(false));
+
+ injectLayer(std::move(mOutputLayer1));
+ injectLayer(std::move(mOutputLayer2));
+ injectLayer(std::move(mOutputLayer3));
+
+ mOutput->editState().isEnabled = true;
+
+ CompositionRefreshArgs args;
+ args.updatingGeometryThisFrame = false;
args.devOptForceClientComposition = true;
mOutput->updateAndWriteCompositionState(args);
}
@@ -512,33 +752,10 @@
*/
struct OutputPrepareFrameTest : public testing::Test {
- struct OutputPartialMock : public impl::Output {
+ struct OutputPartialMock : public OutputPartialMockBase {
// Sets up the helper functions called by prepareFrame to use a mock
// implementations.
MOCK_METHOD0(chooseCompositionStrategy, void());
-
- // compositionengine::Output overrides
- const OutputCompositionState& getState() const override { return mState; }
- OutputCompositionState& editState() override { return mState; }
-
- // These need implementations though are not expected to be called.
- MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
- MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex,
- compositionengine::OutputLayer*(size_t));
- MOCK_METHOD3(ensureOutputLayer,
- compositionengine::OutputLayer*(
- std::optional<size_t>,
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD0(finalizePendingOutputLayers, void());
- MOCK_METHOD0(clearOutputLayers, void());
- MOCK_CONST_METHOD1(dumpState, void(std::string&));
- MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
- MOCK_METHOD2(injectOutputLayerForTest,
- compositionengine::OutputLayer*(
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
-
- impl::OutputCompositionState mState;
};
OutputPrepareFrameTest() {
@@ -586,6 +803,1235 @@
}
/*
+ * Output::present()
+ */
+
+struct OutputPresentTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // All child helper functions Output::present() are defined as mocks,
+ // and those are tested separately, allowing the present() test to
+ // just cover the high level flow.
+ MOCK_METHOD1(updateColorProfile, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD1(updateAndWriteCompositionState,
+ void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD1(setColorTransform, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD0(beginFrame, void());
+ MOCK_METHOD0(prepareFrame, void());
+ MOCK_METHOD1(devOptRepaintFlash, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD1(finishFrame, void(const compositionengine::CompositionRefreshArgs&));
+ MOCK_METHOD0(postFramebuffer, void());
+ };
+
+ StrictMock<OutputPartialMock> mOutput;
+};
+
+TEST_F(OutputPresentTest, justInvokesChildFunctionsInSequence) {
+ CompositionRefreshArgs args;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, updateColorProfile(Ref(args)));
+ EXPECT_CALL(mOutput, updateAndWriteCompositionState(Ref(args)));
+ EXPECT_CALL(mOutput, setColorTransform(Ref(args)));
+ EXPECT_CALL(mOutput, beginFrame());
+ EXPECT_CALL(mOutput, prepareFrame());
+ EXPECT_CALL(mOutput, devOptRepaintFlash(Ref(args)));
+ EXPECT_CALL(mOutput, finishFrame(Ref(args)));
+ EXPECT_CALL(mOutput, postFramebuffer());
+
+ mOutput.present(args);
+}
+
+/*
+ * Output::updateColorProfile()
+ */
+
+struct OutputUpdateColorProfileTest : public testing::Test {
+ using TestType = OutputUpdateColorProfileTest;
+
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // All child helper functions Output::present() are defined as mocks,
+ // and those are tested separately, allowing the present() test to
+ // just cover the high level flow.
+ MOCK_METHOD1(setColorProfile, void(const ColorProfile&));
+ };
+
+ struct Layer {
+ Layer() {
+ EXPECT_CALL(mOutputLayer, getLayer()).WillRepeatedly(ReturnRef(mLayer));
+ EXPECT_CALL(mOutputLayer, getLayerFE()).WillRepeatedly(ReturnRef(mLayerFE));
+ EXPECT_CALL(mLayer, getFEState()).WillRepeatedly(ReturnRef(mLayerFEState));
+ }
+
+ StrictMock<mock::OutputLayer> mOutputLayer;
+ StrictMock<mock::Layer> mLayer;
+ StrictMock<mock::LayerFE> mLayerFE;
+ LayerFECompositionState mLayerFEState;
+ };
+
+ OutputUpdateColorProfileTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0))
+ .WillRepeatedly(Return(&mLayer1.mOutputLayer));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(1))
+ .WillRepeatedly(Return(&mLayer2.mOutputLayer));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(2))
+ .WillRepeatedly(Return(&mLayer3.mOutputLayer));
+ }
+
+ struct ExecuteState : public CallOrderStateMachineHelper<TestType, ExecuteState> {
+ void execute() { getInstance()->mOutput.updateColorProfile(getInstance()->mRefreshArgs); }
+ };
+
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ StrictMock<OutputPartialMock> mOutput;
+
+ Layer mLayer1;
+ Layer mLayer2;
+ Layer mLayer3;
+
+ CompositionRefreshArgs mRefreshArgs;
+};
+
+// TODO(b/144522012): Refactor Output::updateColorProfile and the related code
+// to make it easier to write unit tests.
+
+TEST_F(OutputUpdateColorProfileTest, setsAColorProfileWhenUnmanaged) {
+ // When the outputColorSetting is set to kUnmanaged, the implementation sets
+ // a simple default color profile without looking at anything else.
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(3));
+ EXPECT_CALL(mOutput,
+ setColorProfile(ColorProfileEq(
+ ColorProfile{ui::ColorMode::NATIVE, ui::Dataspace::UNKNOWN,
+ ui::RenderIntent::COLORIMETRIC, ui::Dataspace::UNKNOWN})));
+
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kUnmanaged;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+
+ mOutput.updateColorProfile(mRefreshArgs);
+}
+
+struct OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile
+ : public OutputUpdateColorProfileTest {
+ OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile() {
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(0));
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+ }
+
+ struct ExpectBestColorModeCallResultUsedToSetColorProfileState
+ : public CallOrderStateMachineHelper<
+ TestType, ExpectBestColorModeCallResultUsedToSetColorProfileState> {
+ [[nodiscard]] auto expectBestColorModeCallResultUsedToSetColorProfile(
+ ui::ColorMode colorMode, ui::Dataspace dataspace, ui::RenderIntent renderIntent) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(ui::Dataspace::V0_SRGB, ui::RenderIntent::ENHANCE, _, _,
+ _))
+ .WillOnce(DoAll(SetArgPointee<2>(dataspace), SetArgPointee<3>(colorMode),
+ SetArgPointee<4>(renderIntent)));
+ EXPECT_CALL(getInstance()->mOutput,
+ setColorProfile(
+ ColorProfileEq(ColorProfile{colorMode, dataspace, renderIntent,
+ ui::Dataspace::UNKNOWN})));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() {
+ return ExpectBestColorModeCallResultUsedToSetColorProfileState::make(this);
+ }
+};
+
+TEST_F(OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile,
+ Native_Unknown_Colorimetric_Set) {
+ verify().expectBestColorModeCallResultUsedToSetColorProfile(ui::ColorMode::NATIVE,
+ ui::Dataspace::UNKNOWN,
+ ui::RenderIntent::COLORIMETRIC)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_GetBestColorModeResultBecomesSetProfile,
+ DisplayP3_DisplayP3_Enhance_Set) {
+ verify().expectBestColorModeCallResultUsedToSetColorProfile(ui::ColorMode::DISPLAY_P3,
+ ui::Dataspace::DISPLAY_P3,
+ ui::RenderIntent::ENHANCE)
+ .execute();
+}
+
+struct OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile
+ : public OutputUpdateColorProfileTest {
+ OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile() {
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(0));
+ EXPECT_CALL(*mDisplayColorProfile,
+ getBestColorMode(ui::Dataspace::V0_SRGB, ui::RenderIntent::ENHANCE, _, _, _))
+ .WillRepeatedly(DoAll(SetArgPointee<2>(ui::Dataspace::UNKNOWN),
+ SetArgPointee<3>(ui::ColorMode::NATIVE),
+ SetArgPointee<4>(ui::RenderIntent::COLORIMETRIC)));
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ }
+
+ struct IfColorSpaceAgnosticDataspaceSetToState
+ : public CallOrderStateMachineHelper<TestType, IfColorSpaceAgnosticDataspaceSetToState> {
+ [[nodiscard]] auto ifColorSpaceAgnosticDataspaceSetTo(ui::Dataspace dataspace) {
+ getInstance()->mRefreshArgs.colorSpaceAgnosticDataspace = dataspace;
+ return nextState<ThenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspaceState>();
+ }
+ };
+
+ struct ThenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspaceState
+ : public CallOrderStateMachineHelper<
+ TestType, ThenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspaceState> {
+ [[nodiscard]] auto thenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspace(
+ ui::Dataspace dataspace) {
+ EXPECT_CALL(getInstance()->mOutput,
+ setColorProfile(ColorProfileEq(
+ ColorProfile{ui::ColorMode::NATIVE, ui::Dataspace::UNKNOWN,
+ ui::RenderIntent::COLORIMETRIC, dataspace})));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfColorSpaceAgnosticDataspaceSetToState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile, DisplayP3) {
+ verify().ifColorSpaceAgnosticDataspaceSetTo(ui::Dataspace::DISPLAY_P3)
+ .thenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspace(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_ColorSpaceAgnosticeDataspaceAffectsSetColorProfile, V0_SRGB) {
+ verify().ifColorSpaceAgnosticDataspaceSetTo(ui::Dataspace::V0_SRGB)
+ .thenExpectSetColorProfileCallUsesColorSpaceAgnosticDataspace(ui::Dataspace::V0_SRGB)
+ .execute();
+}
+
+struct OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference
+ : public OutputUpdateColorProfileTest {
+ // Internally the implementation looks through the dataspaces of all the
+ // visible layers. The topmost one that also has an actual dataspace
+ // preference set is used to drive subsequent choices.
+
+ OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(3));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ }
+
+ struct IfTopLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, IfTopLayerDataspaceState> {
+ [[nodiscard]] auto ifTopLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer3.mLayerFEState.dataspace = dataspace;
+ return nextState<AndIfMiddleLayerDataspaceState>();
+ }
+ [[nodiscard]] auto ifTopLayerHasNoPreference() {
+ return ifTopLayerIs(ui::Dataspace::UNKNOWN);
+ }
+ };
+
+ struct AndIfMiddleLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, AndIfMiddleLayerDataspaceState> {
+ [[nodiscard]] auto andIfMiddleLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer2.mLayerFEState.dataspace = dataspace;
+ return nextState<AndIfBottomLayerDataspaceState>();
+ }
+ [[nodiscard]] auto andIfMiddleLayerHasNoPreference() {
+ return andIfMiddleLayerIs(ui::Dataspace::UNKNOWN);
+ }
+ };
+
+ struct AndIfBottomLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, AndIfBottomLayerDataspaceState> {
+ [[nodiscard]] auto andIfBottomLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer1.mLayerFEState.dataspace = dataspace;
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ [[nodiscard]] auto andIfBottomLayerHasNoPreference() {
+ return andIfBottomLayerIs(ui::Dataspace::UNKNOWN);
+ }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::Dataspace dataspace) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(dataspace, _, _, _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfTopLayerDataspaceState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ noStrongLayerPrefenceUses_V0_SRGB) {
+ // If none of the layers indicate a preference, then V0_SRGB is the
+ // preferred choice (subject to additional checks).
+ verify().ifTopLayerHasNoPreference()
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerHasNoPreference()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::V0_SRGB)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifTopmostUses_DisplayP3_Then_DisplayP3_Chosen) {
+ // If only the topmost layer has a preference, then that is what is chosen.
+ verify().ifTopLayerIs(ui::Dataspace::DISPLAY_P3)
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerHasNoPreference()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifMiddleUses_DisplayP3_Then_DisplayP3_Chosen) {
+ // If only the middle layer has a preference, that that is what is chosen.
+ verify().ifTopLayerHasNoPreference()
+ .andIfMiddleLayerIs(ui::Dataspace::DISPLAY_P3)
+ .andIfBottomLayerHasNoPreference()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifBottomUses_DisplayP3_Then_DisplayP3_Chosen) {
+ // If only the middle layer has a preference, that that is what is chosen.
+ verify().ifTopLayerHasNoPreference()
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerIs(ui::Dataspace::DISPLAY_P3)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifTopUses_DisplayBT2020_AndBottomUses_DisplayP3_Then_DisplayBT2020_Chosen) {
+ // If multiple layers have a preference, the topmost value is what is used.
+ verify().ifTopLayerIs(ui::Dataspace::DISPLAY_BT2020)
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerIs(ui::Dataspace::DISPLAY_P3)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_BT2020)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_TopmostLayerPreferenceSetsOutputPreference,
+ ifTopUses_DisplayP3_AndBottomUses_V0_SRGB_Then_DisplayP3_Chosen) {
+ // If multiple layers have a preference, the topmost value is what is used.
+ verify().ifTopLayerIs(ui::Dataspace::DISPLAY_P3)
+ .andIfMiddleLayerHasNoPreference()
+ .andIfBottomLayerIs(ui::Dataspace::DISPLAY_BT2020)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+struct OutputUpdateColorProfileTest_ForceOutputColorOverrides
+ : public OutputUpdateColorProfileTest {
+ // If CompositionRefreshArgs::forceOutputColorMode is set to some specific
+ // values, it overrides the layer dataspace choice.
+
+ OutputUpdateColorProfileTest_ForceOutputColorOverrides() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+
+ mLayer1.mLayerFEState.dataspace = ui::Dataspace::DISPLAY_BT2020;
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ }
+
+ struct IfForceOutputColorModeState
+ : public CallOrderStateMachineHelper<TestType, IfForceOutputColorModeState> {
+ [[nodiscard]] auto ifForceOutputColorMode(ui::ColorMode colorMode) {
+ getInstance()->mRefreshArgs.forceOutputColorMode = colorMode;
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ [[nodiscard]] auto ifNoOverride() { return ifForceOutputColorMode(ui::ColorMode::NATIVE); }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::Dataspace dataspace) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(dataspace, _, _, _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfForceOutputColorModeState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_ForceOutputColorOverrides, NoOverride_DoesNotOverride) {
+ // By default the layer state is used to set the preferred dataspace
+ verify().ifNoOverride()
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_BT2020)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_ForceOutputColorOverrides, SRGB_Override_USES_V0_SRGB) {
+ // Setting ui::ColorMode::SRGB overrides it with ui::Dataspace::V0_SRGB
+ verify().ifForceOutputColorMode(ui::ColorMode::SRGB)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::V0_SRGB)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_ForceOutputColorOverrides, DisplayP3_Override_Uses_DisplayP3) {
+ // Setting ui::ColorMode::DISPLAY_P3 overrides it with ui::Dataspace::DISPLAY_P3
+ verify().ifForceOutputColorMode(ui::ColorMode::DISPLAY_P3)
+ .thenExpectBestColorModeCallUses(ui::Dataspace::DISPLAY_P3)
+ .execute();
+}
+
+// HDR output requires all layers to be compatible with the chosen HDR
+// dataspace, along with there being proper support.
+struct OutputUpdateColorProfileTest_Hdr : public OutputUpdateColorProfileTest {
+ OutputUpdateColorProfileTest_Hdr() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ }
+
+ static constexpr ui::Dataspace kNonHdrDataspace = ui::Dataspace::DISPLAY_P3;
+ static constexpr ui::Dataspace BT2020_PQ = ui::Dataspace::BT2020_PQ;
+ static constexpr ui::Dataspace BT2020_HLG = ui::Dataspace::BT2020_HLG;
+ static constexpr ui::Dataspace DISPLAY_P3 = ui::Dataspace::DISPLAY_P3;
+
+ struct IfTopLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, IfTopLayerDataspaceState> {
+ [[nodiscard]] auto ifTopLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer2.mLayerFEState.dataspace = dataspace;
+ return nextState<AndTopLayerCompositionTypeState>();
+ }
+ [[nodiscard]] auto ifTopLayerIsNotHdr() { return ifTopLayerIs(kNonHdrDataspace); }
+ };
+
+ struct AndTopLayerCompositionTypeState
+ : public CallOrderStateMachineHelper<TestType, AndTopLayerCompositionTypeState> {
+ [[nodiscard]] auto andTopLayerIsREComposed(bool renderEngineComposed) {
+ getInstance()->mLayer2.mLayerFEState.forceClientComposition = renderEngineComposed;
+ return nextState<AndIfBottomLayerDataspaceState>();
+ }
+ };
+
+ struct AndIfBottomLayerDataspaceState
+ : public CallOrderStateMachineHelper<TestType, AndIfBottomLayerDataspaceState> {
+ [[nodiscard]] auto andIfBottomLayerIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer1.mLayerFEState.dataspace = dataspace;
+ return nextState<AndBottomLayerCompositionTypeState>();
+ }
+ [[nodiscard]] auto andIfBottomLayerIsNotHdr() {
+ return andIfBottomLayerIs(kNonHdrDataspace);
+ }
+ };
+
+ struct AndBottomLayerCompositionTypeState
+ : public CallOrderStateMachineHelper<TestType, AndBottomLayerCompositionTypeState> {
+ [[nodiscard]] auto andBottomLayerIsREComposed(bool renderEngineComposed) {
+ getInstance()->mLayer1.mLayerFEState.forceClientComposition = renderEngineComposed;
+ return nextState<AndIfHasLegacySupportState>();
+ }
+ };
+
+ struct AndIfHasLegacySupportState
+ : public CallOrderStateMachineHelper<TestType, AndIfHasLegacySupportState> {
+ [[nodiscard]] auto andIfLegacySupportFor(ui::Dataspace dataspace, bool legacySupport) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile, hasLegacyHdrSupport(dataspace))
+ .WillOnce(Return(legacySupport));
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::Dataspace dataspace) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(dataspace, _, _, _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Call this member function to start using the mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfTopLayerDataspaceState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_PQ_HW_Uses_PQ) {
+ // If all layers use BT2020_PQ, and there are no other special conditions,
+ // BT2020_PQ is used.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_PQ_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_PQ is not used if there is only legacy support for it.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_PQ_RE_Uses_PQ) {
+ // BT2020_PQ is still used if the bottom layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_RE_On_PQ_HW_Uses_DisplayP3) {
+ // BT2020_PQ is not used if the top layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_HLG_HW_Uses_PQ) {
+ // If there is mixed HLG/PQ use, and the topmost layer is PQ, then PQ is used if there
+ // are no other special conditions.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_HLG_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_PQ is not used if there is only legacy support for it.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_HLG_RE_Uses_PQ) {
+ // BT2020_PQ is used if the bottom HLG layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_RE_On_HLG_HW_Uses_DisplayP3) {
+ // BT2020_PQ is not used if the top PQ layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_PQ_HW_Uses_PQ) {
+ // If there is mixed HLG/PQ use, and the topmost layer is HLG, then PQ is
+ // used if there are no other special conditions.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_PQ_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_PQ is not used if there is only legacy support for it.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_PQ_RE_Uses_DisplayP3) {
+ // BT2020_PQ is not used if the bottom PQ layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_RE_On_PQ_HW_Uses_PQ) {
+ // BT2020_PQ is still used if the top HLG layer is RenderEngine composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_PQ)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_HLG_HW_Uses_HLG) {
+ // If all layers use HLG then HLG is used if there are no other special
+ // conditions.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_HLG_HW_IfPQHasLegacySupport_Uses_DisplayP3) {
+ // BT2020_HLG is not used if there is legacy support for it.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_HLG, true)
+ .thenExpectBestColorModeCallUses(DISPLAY_P3)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_HLG_RE_Uses_HLG) {
+ // BT2020_HLG is used even if the bottom layer is client composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_RE_On_HLG_HW_Uses_HLG) {
+ // BT2020_HLG is used even if the top layer is client composed.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(true)
+ .andIfBottomLayerIs(BT2020_HLG)
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, PQ_HW_On_NonHdr_HW_Uses_PQ) {
+ // Even if there are non-HDR layers present, BT2020_PQ can still be used.
+ verify().ifTopLayerIs(BT2020_PQ)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIsNotHdr()
+ .andBottomLayerIsREComposed(false)
+ .andIfLegacySupportFor(BT2020_PQ, false)
+ .thenExpectBestColorModeCallUses(BT2020_PQ)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfileTest_Hdr, HLG_HW_On_NonHdr_RE_Uses_HLG) {
+ // If all layers use HLG then HLG is used if there are no other special
+ // conditions.
+ verify().ifTopLayerIs(BT2020_HLG)
+ .andTopLayerIsREComposed(false)
+ .andIfBottomLayerIsNotHdr()
+ .andBottomLayerIsREComposed(true)
+ .andIfLegacySupportFor(BT2020_HLG, false)
+ .thenExpectBestColorModeCallUses(BT2020_HLG)
+ .execute();
+}
+
+struct OutputUpdateColorProfile_AffectsChosenRenderIntentTest
+ : public OutputUpdateColorProfileTest {
+ // The various values for CompositionRefreshArgs::outputColorSetting affect
+ // the chosen renderIntent, along with whether the preferred dataspace is an
+ // HDR dataspace or not.
+
+ OutputUpdateColorProfile_AffectsChosenRenderIntentTest() {
+ mRefreshArgs.outputColorSetting = OutputColorSetting::kEnhanced;
+ mRefreshArgs.colorSpaceAgnosticDataspace = ui::Dataspace::UNKNOWN;
+ mLayer1.mLayerFEState.dataspace = ui::Dataspace::BT2020_PQ;
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mOutput, setColorProfile(_)).WillRepeatedly(Return());
+ EXPECT_CALL(*mDisplayColorProfile, hasLegacyHdrSupport(ui::Dataspace::BT2020_PQ))
+ .WillRepeatedly(Return(false));
+ }
+
+ // The tests here involve enough state and GMock setup that using a mini-DSL
+ // makes the tests much more readable, and allows the test to focus more on
+ // the intent than on some of the details.
+
+ static constexpr ui::Dataspace kNonHdrDataspace = ui::Dataspace::DISPLAY_P3;
+ static constexpr ui::Dataspace kHdrDataspace = ui::Dataspace::BT2020_PQ;
+
+ struct IfDataspaceChosenState
+ : public CallOrderStateMachineHelper<TestType, IfDataspaceChosenState> {
+ [[nodiscard]] auto ifDataspaceChosenIs(ui::Dataspace dataspace) {
+ getInstance()->mLayer1.mLayerFEState.dataspace = dataspace;
+ return nextState<AndOutputColorSettingState>();
+ }
+ [[nodiscard]] auto ifDataspaceChosenIsNonHdr() {
+ return ifDataspaceChosenIs(kNonHdrDataspace);
+ }
+ [[nodiscard]] auto ifDataspaceChosenIsHdr() { return ifDataspaceChosenIs(kHdrDataspace); }
+ };
+
+ struct AndOutputColorSettingState
+ : public CallOrderStateMachineHelper<TestType, AndOutputColorSettingState> {
+ [[nodiscard]] auto andOutputColorSettingIs(OutputColorSetting setting) {
+ getInstance()->mRefreshArgs.outputColorSetting = setting;
+ return nextState<ThenExpectBestColorModeCallUsesState>();
+ }
+ };
+
+ struct ThenExpectBestColorModeCallUsesState
+ : public CallOrderStateMachineHelper<TestType, ThenExpectBestColorModeCallUsesState> {
+ [[nodiscard]] auto thenExpectBestColorModeCallUses(ui::RenderIntent intent) {
+ EXPECT_CALL(*getInstance()->mDisplayColorProfile,
+ getBestColorMode(getInstance()->mLayer1.mLayerFEState.dataspace, intent, _,
+ _, _));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ // Tests call one of these two helper member functions to start using the
+ // mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfDataspaceChosenState::make(this); }
+};
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest,
+ Managed_NonHdr_Prefers_Colorimetric) {
+ verify().ifDataspaceChosenIsNonHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kManaged)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::COLORIMETRIC)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest,
+ Managed_Hdr_Prefers_ToneMapColorimetric) {
+ verify().ifDataspaceChosenIsHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kManaged)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::TONE_MAP_COLORIMETRIC)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest, Enhanced_NonHdr_Prefers_Enhance) {
+ verify().ifDataspaceChosenIsNonHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kEnhanced)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::ENHANCE)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest,
+ Enhanced_Hdr_Prefers_ToneMapEnhance) {
+ verify().ifDataspaceChosenIsHdr()
+ .andOutputColorSettingIs(OutputColorSetting::kEnhanced)
+ .thenExpectBestColorModeCallUses(ui::RenderIntent::TONE_MAP_ENHANCE)
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest, Vendor_NonHdr_Prefers_Vendor) {
+ verify().ifDataspaceChosenIsNonHdr()
+ .andOutputColorSettingIs(kVendorSpecifiedOutputColorSetting)
+ .thenExpectBestColorModeCallUses(
+ static_cast<ui::RenderIntent>(kVendorSpecifiedOutputColorSetting))
+ .execute();
+}
+
+TEST_F(OutputUpdateColorProfile_AffectsChosenRenderIntentTest, Vendor_Hdr_Prefers_Vendor) {
+ verify().ifDataspaceChosenIsHdr()
+ .andOutputColorSettingIs(kVendorSpecifiedOutputColorSetting)
+ .thenExpectBestColorModeCallUses(
+ static_cast<ui::RenderIntent>(kVendorSpecifiedOutputColorSetting))
+ .execute();
+}
+
+/*
+ * Output::beginFrame()
+ */
+
+struct OutputBeginFrameTest : public ::testing::Test {
+ using TestType = OutputBeginFrameTest;
+
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // Sets up the helper functions called by begiNFrame to use a mock
+ // implementations.
+ MOCK_CONST_METHOD1(getDirtyRegion, Region(bool));
+ };
+
+ OutputBeginFrameTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+ }
+
+ struct IfGetDirtyRegionExpectationState
+ : public CallOrderStateMachineHelper<TestType, IfGetDirtyRegionExpectationState> {
+ [[nodiscard]] auto ifGetDirtyRegionReturns(Region dirtyRegion) {
+ EXPECT_CALL(getInstance()->mOutput, getDirtyRegion(false))
+ .WillOnce(Return(dirtyRegion));
+ return nextState<AndIfGetOutputLayerCountExpectationState>();
+ }
+ };
+
+ struct AndIfGetOutputLayerCountExpectationState
+ : public CallOrderStateMachineHelper<TestType, AndIfGetOutputLayerCountExpectationState> {
+ [[nodiscard]] auto andIfGetOutputLayerCountReturns(size_t layerCount) {
+ EXPECT_CALL(getInstance()->mOutput, getOutputLayerCount()).WillOnce(Return(layerCount));
+ return nextState<AndIfLastCompositionHadVisibleLayersState>();
+ }
+ };
+
+ struct AndIfLastCompositionHadVisibleLayersState
+ : public CallOrderStateMachineHelper<TestType,
+ AndIfLastCompositionHadVisibleLayersState> {
+ [[nodiscard]] auto andIfLastCompositionHadVisibleLayersIs(bool hadOutputLayers) {
+ getInstance()->mOutput.mState.lastCompositionHadVisibleLayers = hadOutputLayers;
+ return nextState<ThenExpectRenderSurfaceBeginFrameCallState>();
+ }
+ };
+
+ struct ThenExpectRenderSurfaceBeginFrameCallState
+ : public CallOrderStateMachineHelper<TestType,
+ ThenExpectRenderSurfaceBeginFrameCallState> {
+ [[nodiscard]] auto thenExpectRenderSurfaceBeginFrameCall(bool mustRecompose) {
+ EXPECT_CALL(*getInstance()->mRenderSurface, beginFrame(mustRecompose));
+ return nextState<ExecuteState>();
+ }
+ };
+
+ struct ExecuteState : public CallOrderStateMachineHelper<TestType, ExecuteState> {
+ [[nodiscard]] auto execute() {
+ getInstance()->mOutput.beginFrame();
+ return nextState<CheckPostconditionHadVisibleLayersState>();
+ }
+ };
+
+ struct CheckPostconditionHadVisibleLayersState
+ : public CallOrderStateMachineHelper<TestType, CheckPostconditionHadVisibleLayersState> {
+ void checkPostconditionHadVisibleLayers(bool expected) {
+ EXPECT_EQ(expected, getInstance()->mOutput.mState.lastCompositionHadVisibleLayers);
+ }
+ };
+
+ // Tests call one of these two helper member functions to start using the
+ // mini-DSL defined above.
+ [[nodiscard]] auto verify() { return IfGetDirtyRegionExpectationState::make(this); }
+
+ static const Region kEmptyRegion;
+ static const Region kNotEmptyRegion;
+
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ StrictMock<OutputPartialMock> mOutput;
+};
+
+const Region OutputBeginFrameTest::kEmptyRegion{Rect{0, 0, 0, 0}};
+const Region OutputBeginFrameTest::kNotEmptyRegion{Rect{0, 0, 1, 1}};
+
+TEST_F(OutputBeginFrameTest, hasDirtyHasLayersHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kNotEmptyRegion)
+ .andIfGetOutputLayerCountReturns(1u)
+ .andIfLastCompositionHadVisibleLayersIs(true)
+ .thenExpectRenderSurfaceBeginFrameCall(true)
+ .execute()
+ .checkPostconditionHadVisibleLayers(true);
+}
+
+TEST_F(OutputBeginFrameTest, hasDirtyNotHasLayersHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kNotEmptyRegion)
+ .andIfGetOutputLayerCountReturns(0u)
+ .andIfLastCompositionHadVisibleLayersIs(true)
+ .thenExpectRenderSurfaceBeginFrameCall(true)
+ .execute()
+ .checkPostconditionHadVisibleLayers(false);
+}
+
+TEST_F(OutputBeginFrameTest, hasDirtyHasLayersNotHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kNotEmptyRegion)
+ .andIfGetOutputLayerCountReturns(1u)
+ .andIfLastCompositionHadVisibleLayersIs(false)
+ .thenExpectRenderSurfaceBeginFrameCall(true)
+ .execute()
+ .checkPostconditionHadVisibleLayers(true);
+}
+
+TEST_F(OutputBeginFrameTest, hasDirtyNotHasLayersNotHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kNotEmptyRegion)
+ .andIfGetOutputLayerCountReturns(0u)
+ .andIfLastCompositionHadVisibleLayersIs(false)
+ .thenExpectRenderSurfaceBeginFrameCall(false)
+ .execute()
+ .checkPostconditionHadVisibleLayers(false);
+}
+
+TEST_F(OutputBeginFrameTest, notHasDirtyHasLayersHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kEmptyRegion)
+ .andIfGetOutputLayerCountReturns(1u)
+ .andIfLastCompositionHadVisibleLayersIs(true)
+ .thenExpectRenderSurfaceBeginFrameCall(false)
+ .execute()
+ .checkPostconditionHadVisibleLayers(true);
+}
+
+TEST_F(OutputBeginFrameTest, notHasDirtyNotHasLayersHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kEmptyRegion)
+ .andIfGetOutputLayerCountReturns(0u)
+ .andIfLastCompositionHadVisibleLayersIs(true)
+ .thenExpectRenderSurfaceBeginFrameCall(false)
+ .execute()
+ .checkPostconditionHadVisibleLayers(true);
+}
+
+TEST_F(OutputBeginFrameTest, notHasDirtyHasLayersNotHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kEmptyRegion)
+ .andIfGetOutputLayerCountReturns(1u)
+ .andIfLastCompositionHadVisibleLayersIs(false)
+ .thenExpectRenderSurfaceBeginFrameCall(false)
+ .execute()
+ .checkPostconditionHadVisibleLayers(false);
+}
+
+TEST_F(OutputBeginFrameTest, notHasDirtyNotHasLayersNotHadLayersLastFrame) {
+ verify().ifGetDirtyRegionReturns(kEmptyRegion)
+ .andIfGetOutputLayerCountReturns(0u)
+ .andIfLastCompositionHadVisibleLayersIs(false)
+ .thenExpectRenderSurfaceBeginFrameCall(false)
+ .execute()
+ .checkPostconditionHadVisibleLayers(false);
+}
+
+/*
+ * Output::devOptRepaintFlash()
+ */
+
+struct OutputDevOptRepaintFlashTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // Sets up the helper functions called by composeSurfaces to use a mock
+ // implementations.
+ MOCK_CONST_METHOD1(getDirtyRegion, Region(bool));
+ MOCK_METHOD1(composeSurfaces, std::optional<base::unique_fd>(const Region&));
+ MOCK_METHOD0(postFramebuffer, void());
+ MOCK_METHOD0(prepareFrame, void());
+ };
+
+ OutputDevOptRepaintFlashTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+ }
+
+ static const Region kEmptyRegion;
+ static const Region kNotEmptyRegion;
+
+ StrictMock<OutputPartialMock> mOutput;
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ CompositionRefreshArgs mRefreshArgs;
+};
+
+const Region OutputDevOptRepaintFlashTest::kEmptyRegion{Rect{0, 0, 0, 0}};
+const Region OutputDevOptRepaintFlashTest::kNotEmptyRegion{Rect{0, 0, 1, 1}};
+
+TEST_F(OutputDevOptRepaintFlashTest, doesNothingIfFlashDelayNotSet) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = {};
+ mRefreshArgs.repaintEverything = true;
+ mOutput.mState.isEnabled = true;
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+TEST_F(OutputDevOptRepaintFlashTest, postsAndPreparesANewFrameIfNotEnabled) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::microseconds(1);
+ mRefreshArgs.repaintEverything = true;
+ mOutput.mState.isEnabled = false;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, postFramebuffer());
+ EXPECT_CALL(mOutput, prepareFrame());
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+TEST_F(OutputDevOptRepaintFlashTest, postsAndPreparesANewFrameIfNotDirty) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::microseconds(1);
+ mRefreshArgs.repaintEverything = true;
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, getDirtyRegion(true)).WillOnce(Return(kEmptyRegion));
+ EXPECT_CALL(mOutput, postFramebuffer());
+ EXPECT_CALL(mOutput, prepareFrame());
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+TEST_F(OutputDevOptRepaintFlashTest, alsoComposesSurfacesAndQueuesABufferIfDirty) {
+ mRefreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::microseconds(1);
+ mRefreshArgs.repaintEverything = false;
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, getDirtyRegion(false)).WillOnce(Return(kNotEmptyRegion));
+ EXPECT_CALL(mOutput, composeSurfaces(RegionEq(kNotEmptyRegion)));
+ EXPECT_CALL(*mRenderSurface, queueBuffer(_));
+ EXPECT_CALL(mOutput, postFramebuffer());
+ EXPECT_CALL(mOutput, prepareFrame());
+
+ mOutput.devOptRepaintFlash(mRefreshArgs);
+}
+
+// TODO(b/144060211) - Add coverage
+
+/*
+ * Output::finishFrame()
+ */
+
+struct OutputFinishFrameTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // Sets up the helper functions called by composeSurfaces to use a mock
+ // implementations.
+ MOCK_METHOD1(composeSurfaces, std::optional<base::unique_fd>(const Region&));
+ MOCK_METHOD0(postFramebuffer, void());
+ };
+
+ OutputFinishFrameTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+ }
+
+ StrictMock<OutputPartialMock> mOutput;
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+ CompositionRefreshArgs mRefreshArgs;
+};
+
+TEST_F(OutputFinishFrameTest, ifNotEnabledDoesNothing) {
+ mOutput.mState.isEnabled = false;
+
+ mOutput.finishFrame(mRefreshArgs);
+}
+
+TEST_F(OutputFinishFrameTest, takesEarlyOutifComposeSurfacesReturnsNoFence) {
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, composeSurfaces(RegionEq(Region::INVALID_REGION)));
+
+ mOutput.finishFrame(mRefreshArgs);
+}
+
+TEST_F(OutputFinishFrameTest, queuesBufferIfComposeSurfacesReturnsAFence) {
+ mOutput.mState.isEnabled = true;
+
+ InSequence seq;
+ EXPECT_CALL(mOutput, composeSurfaces(RegionEq(Region::INVALID_REGION)))
+ .WillOnce(Return(ByMove(base::unique_fd())));
+ EXPECT_CALL(*mRenderSurface, queueBuffer(_));
+
+ mOutput.finishFrame(mRefreshArgs);
+}
+
+/*
+ * Output::postFramebuffer()
+ */
+
+struct OutputPostFramebufferTest : public testing::Test {
+ struct OutputPartialMock : public OutputPartialMockBase {
+ // Sets up the helper functions called by composeSurfaces to use a mock
+ // implementations.
+ MOCK_METHOD0(presentAndGetFrameFences, compositionengine::Output::FrameFences());
+ };
+
+ struct Layer {
+ Layer() {
+ EXPECT_CALL(outputLayer, getLayerFE()).WillRepeatedly(ReturnRef(layerFE));
+ EXPECT_CALL(outputLayer, getHwcLayer()).WillRepeatedly(Return(&hwc2Layer));
+ }
+
+ StrictMock<mock::OutputLayer> outputLayer;
+ StrictMock<mock::LayerFE> layerFE;
+ StrictMock<HWC2::mock::Layer> hwc2Layer;
+ };
+
+ OutputPostFramebufferTest() {
+ mOutput.setDisplayColorProfileForTest(
+ std::unique_ptr<DisplayColorProfile>(mDisplayColorProfile));
+ mOutput.setRenderSurfaceForTest(std::unique_ptr<RenderSurface>(mRenderSurface));
+
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(3u));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
+ .WillRepeatedly(Return(&mLayer1.outputLayer));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(1u))
+ .WillRepeatedly(Return(&mLayer2.outputLayer));
+ EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(2u))
+ .WillRepeatedly(Return(&mLayer3.outputLayer));
+ }
+
+ StrictMock<OutputPartialMock> mOutput;
+ mock::DisplayColorProfile* mDisplayColorProfile = new StrictMock<mock::DisplayColorProfile>();
+ mock::RenderSurface* mRenderSurface = new StrictMock<mock::RenderSurface>();
+
+ Layer mLayer1;
+ Layer mLayer2;
+ Layer mLayer3;
+};
+
+TEST_F(OutputPostFramebufferTest, ifNotEnabledDoesNothing) {
+ mOutput.mState.isEnabled = false;
+
+ mOutput.postFramebuffer();
+}
+
+TEST_F(OutputPostFramebufferTest, ifEnabledMustFlipThenPresentThenSendPresentCompleted) {
+ mOutput.mState.isEnabled = true;
+
+ compositionengine::Output::FrameFences frameFences;
+
+ // This should happen even if there are no output layers.
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillOnce(Return(0u));
+
+ // For this test in particular we want to make sure the call expectations
+ // setup below are satisfied in the specific order.
+ InSequence seq;
+
+ EXPECT_CALL(*mRenderSurface, flip());
+ EXPECT_CALL(mOutput, presentAndGetFrameFences()).WillOnce(Return(frameFences));
+ EXPECT_CALL(*mRenderSurface, onPresentDisplayCompleted());
+
+ mOutput.postFramebuffer();
+}
+
+TEST_F(OutputPostFramebufferTest, releaseFencesAreSentToLayerFE) {
+ // Simulate getting release fences from each layer, and ensure they are passed to the
+ // front-end layer interface for each layer correctly.
+
+ mOutput.mState.isEnabled = true;
+
+ // Create three unique fence instances
+ sp<Fence> layer1Fence = new Fence();
+ sp<Fence> layer2Fence = new Fence();
+ sp<Fence> layer3Fence = new Fence();
+
+ compositionengine::Output::FrameFences frameFences;
+ frameFences.layerFences.emplace(&mLayer1.hwc2Layer, layer1Fence);
+ frameFences.layerFences.emplace(&mLayer2.hwc2Layer, layer2Fence);
+ frameFences.layerFences.emplace(&mLayer3.hwc2Layer, layer3Fence);
+
+ EXPECT_CALL(*mRenderSurface, flip());
+ EXPECT_CALL(mOutput, presentAndGetFrameFences()).WillOnce(Return(frameFences));
+ EXPECT_CALL(*mRenderSurface, onPresentDisplayCompleted());
+
+ // Compare the pointers values of each fence to make sure the correct ones
+ // are passed. This happens to work with the current implementation, but
+ // would not survive certain calls like Fence::merge() which would return a
+ // new instance.
+ EXPECT_CALL(mLayer1.layerFE,
+ onLayerDisplayed(Property(&sp<Fence>::get, Eq(layer1Fence.get()))));
+ EXPECT_CALL(mLayer2.layerFE,
+ onLayerDisplayed(Property(&sp<Fence>::get, Eq(layer2Fence.get()))));
+ EXPECT_CALL(mLayer3.layerFE,
+ onLayerDisplayed(Property(&sp<Fence>::get, Eq(layer3Fence.get()))));
+
+ mOutput.postFramebuffer();
+}
+
+TEST_F(OutputPostFramebufferTest, releaseFencesIncludeClientTargetAcquireFence) {
+ mOutput.mState.isEnabled = true;
+ mOutput.mState.usesClientComposition = true;
+
+ sp<Fence> clientTargetAcquireFence = new Fence();
+ sp<Fence> layer1Fence = new Fence();
+ sp<Fence> layer2Fence = new Fence();
+ sp<Fence> layer3Fence = new Fence();
+ compositionengine::Output::FrameFences frameFences;
+ frameFences.clientTargetAcquireFence = clientTargetAcquireFence;
+ frameFences.layerFences.emplace(&mLayer1.hwc2Layer, layer1Fence);
+ frameFences.layerFences.emplace(&mLayer2.hwc2Layer, layer2Fence);
+ frameFences.layerFences.emplace(&mLayer3.hwc2Layer, layer3Fence);
+
+ EXPECT_CALL(*mRenderSurface, flip());
+ EXPECT_CALL(mOutput, presentAndGetFrameFences()).WillOnce(Return(frameFences));
+ EXPECT_CALL(*mRenderSurface, onPresentDisplayCompleted());
+
+ // Fence::merge is called, and since none of the fences are actually valid,
+ // Fence::NO_FENCE is returned and passed to each onLayerDisplayed() call.
+ // This is the best we can do without creating a real kernel fence object.
+ EXPECT_CALL(mLayer1.layerFE, onLayerDisplayed(Fence::NO_FENCE));
+ EXPECT_CALL(mLayer2.layerFE, onLayerDisplayed(Fence::NO_FENCE));
+ EXPECT_CALL(mLayer3.layerFE, onLayerDisplayed(Fence::NO_FENCE));
+
+ mOutput.postFramebuffer();
+}
+
+TEST_F(OutputPostFramebufferTest, releasedLayersSentPresentFence) {
+ mOutput.mState.isEnabled = true;
+ mOutput.mState.usesClientComposition = true;
+
+ // This should happen even if there are no (current) output layers.
+ EXPECT_CALL(mOutput, getOutputLayerCount()).WillOnce(Return(0u));
+
+ // Load up the released layers with some mock instances
+ sp<StrictMock<mock::LayerFE>> releasedLayer1{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> releasedLayer2{new StrictMock<mock::LayerFE>()};
+ sp<StrictMock<mock::LayerFE>> releasedLayer3{new StrictMock<mock::LayerFE>()};
+ Output::ReleasedLayers layers;
+ layers.push_back(releasedLayer1);
+ layers.push_back(releasedLayer2);
+ layers.push_back(releasedLayer3);
+ mOutput.setReleasedLayers(std::move(layers));
+
+ // Set up a fake present fence
+ sp<Fence> presentFence = new Fence();
+ compositionengine::Output::FrameFences frameFences;
+ frameFences.presentFence = presentFence;
+
+ EXPECT_CALL(*mRenderSurface, flip());
+ EXPECT_CALL(mOutput, presentAndGetFrameFences()).WillOnce(Return(frameFences));
+ EXPECT_CALL(*mRenderSurface, onPresentDisplayCompleted());
+
+ // Each released layer should be given the presentFence.
+ EXPECT_CALL(*releasedLayer1,
+ onLayerDisplayed(Property(&sp<Fence>::get, Eq(presentFence.get()))));
+ EXPECT_CALL(*releasedLayer2,
+ onLayerDisplayed(Property(&sp<Fence>::get, Eq(presentFence.get()))));
+ EXPECT_CALL(*releasedLayer3,
+ onLayerDisplayed(Property(&sp<Fence>::get, Eq(presentFence.get()))));
+
+ mOutput.postFramebuffer();
+
+ // After the call the list of released layers should have been cleared.
+ EXPECT_TRUE(mOutput.getReleasedLayersForTest().empty());
+}
+
+/*
* Output::composeSurfaces()
*/
@@ -598,7 +2044,7 @@
static const Rect kDefaultOutputScissor;
static const mat4 kDefaultColorTransformMat;
- struct OutputPartialMock : public impl::Output {
+ struct OutputPartialMock : public OutputPartialMockBase {
// Sets up the helper functions called by composeSurfaces to use a mock
// implementations.
MOCK_CONST_METHOD0(getSkipColorTransform, bool());
@@ -607,29 +2053,6 @@
MOCK_METHOD2(appendRegionFlashRequests,
void(const Region&, std::vector<renderengine::LayerSettings>&));
MOCK_METHOD1(setExpensiveRenderingExpected, void(bool));
-
- // compositionengine::Output overrides
- const OutputCompositionState& getState() const override { return mState; }
- OutputCompositionState& editState() override { return mState; }
-
- // These need implementations though are not expected to be called.
- MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
- MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex,
- compositionengine::OutputLayer*(size_t));
- MOCK_METHOD3(ensureOutputLayer,
- compositionengine::OutputLayer*(
- std::optional<size_t>,
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD0(finalizePendingOutputLayers, void());
- MOCK_METHOD0(clearOutputLayers, void());
- MOCK_CONST_METHOD1(dumpState, void(std::string&));
- MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
- MOCK_METHOD2(injectOutputLayerForTest,
- compositionengine::OutputLayer*(
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
-
- impl::OutputCompositionState mState;
};
OutputComposeSurfacesTest() {
@@ -718,36 +2141,13 @@
*/
struct GenerateClientCompositionRequestsTest : public testing::Test {
- struct OutputPartialMock : public impl::Output {
+ struct OutputPartialMock : public OutputPartialMockBase {
// compositionengine::Output overrides
-
std::vector<renderengine::LayerSettings> generateClientCompositionRequests(
bool supportsProtectedContent, Region& clearRegion) override {
return impl::Output::generateClientCompositionRequests(supportsProtectedContent,
clearRegion);
}
-
- const OutputCompositionState& getState() const override { return mState; }
- OutputCompositionState& editState() override { return mState; }
-
- // These need implementations though are not expected to be called.
- MOCK_CONST_METHOD0(getOutputLayerCount, size_t());
- MOCK_CONST_METHOD1(getOutputLayerOrderedByZByIndex,
- compositionengine::OutputLayer*(size_t));
- MOCK_METHOD3(ensureOutputLayer,
- compositionengine::OutputLayer*(
- std::optional<size_t>,
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD0(finalizePendingOutputLayers, void());
- MOCK_METHOD0(clearOutputLayers, void());
- MOCK_CONST_METHOD1(dumpState, void(std::string&));
- MOCK_CONST_METHOD0(getCompositionEngine, const CompositionEngine&());
- MOCK_METHOD2(injectOutputLayerForTest,
- compositionengine::OutputLayer*(
- const std::shared_ptr<compositionengine::Layer>&, const sp<LayerFE>&));
- MOCK_METHOD1(injectOutputLayerForTest, void(std::unique_ptr<OutputLayer>));
-
- impl::OutputCompositionState mState;
};
GenerateClientCompositionRequestsTest() {
@@ -805,6 +2205,7 @@
EXPECT_CALL(leftOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(leftLayer, getFEState()).WillRepeatedly(ReturnRef(leftLayerFEState));
EXPECT_CALL(leftLayerFE, prepareClientComposition(_)).WillOnce(Return(leftLayerRESettings));
+ EXPECT_CALL(leftOutputLayer, editState()).WillRepeatedly(ReturnRef(leftOutputLayerState));
EXPECT_CALL(rightOutputLayer, getState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(rightOutputLayer, getLayer()).WillRepeatedly(ReturnRef(rightLayer));
@@ -813,6 +2214,7 @@
EXPECT_CALL(rightOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(rightLayer, getFEState()).WillRepeatedly(ReturnRef(rightLayerFEState));
EXPECT_CALL(rightLayerFE, prepareClientComposition(_)).WillOnce(Return(rightLayerRESettings));
+ EXPECT_CALL(rightOutputLayer, editState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
@@ -865,6 +2267,7 @@
EXPECT_CALL(outputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(layer, getFEState()).WillRepeatedly(ReturnRef(layerFEState));
EXPECT_CALL(layerFE, prepareClientComposition(_)).Times(0);
+ EXPECT_CALL(outputLayer, editState()).WillRepeatedly(ReturnRef(outputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(1u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u)).WillRepeatedly(Return(&outputLayer));
@@ -930,6 +2333,7 @@
EXPECT_CALL(leftOutputLayer, requiresClientComposition()).WillRepeatedly(Return(false));
EXPECT_CALL(leftOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(leftLayer, getFEState()).WillRepeatedly(ReturnRef(leftLayerFEState));
+ EXPECT_CALL(leftOutputLayer, editState()).WillRepeatedly(ReturnRef(leftOutputLayerState));
EXPECT_CALL(rightOutputLayer, getState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(rightOutputLayer, getLayer()).WillRepeatedly(ReturnRef(rightLayer));
@@ -938,6 +2342,7 @@
EXPECT_CALL(rightOutputLayer, needsFiltering()).WillRepeatedly(Return(false));
EXPECT_CALL(rightLayer, getFEState()).WillRepeatedly(ReturnRef(rightLayerFEState));
EXPECT_CALL(rightLayerFE, prepareClientComposition(_)).WillOnce(Return(rightLayerRESettings));
+ EXPECT_CALL(rightOutputLayer, editState()).WillRepeatedly(ReturnRef(rightOutputLayerState));
EXPECT_CALL(mOutput, getOutputLayerCount()).WillRepeatedly(Return(2u));
EXPECT_CALL(mOutput, getOutputLayerOrderedByZByIndex(0u))
diff --git a/services/surfaceflinger/DisplayHardware/ComposerHal.cpp b/services/surfaceflinger/DisplayHardware/ComposerHal.cpp
index aef1c75..dc71128 100644
--- a/services/surfaceflinger/DisplayHardware/ComposerHal.cpp
+++ b/services/surfaceflinger/DisplayHardware/ComposerHal.cpp
@@ -95,7 +95,7 @@
// assume NO_RESOURCES when Status::isOk returns false
constexpr Error kDefaultError = Error::NO_RESOURCES;
-constexpr V2_4::Error kDefaultError_2_4 = V2_4::Error::NO_RESOURCES;
+constexpr V2_4::Error kDefaultError_2_4 = static_cast<V2_4::Error>(kDefaultError);
template<typename T, typename U>
T unwrapRet(Return<T>& ret, const U& default_val)
@@ -247,7 +247,12 @@
void Composer::registerCallback(const sp<IComposerCallback>& callback)
{
android::hardware::setMinSchedulerPolicy(callback, SCHED_FIFO, 2);
- auto ret = mClient->registerCallback(callback);
+ auto ret = [&]() {
+ if (mClient_2_4) {
+ return mClient_2_4->registerCallback_2_4(callback);
+ }
+ return mClient->registerCallback(callback);
+ }();
if (!ret.isOk()) {
ALOGE("failed to register IComposerCallback");
}
@@ -413,15 +418,28 @@
IComposerClient::Attribute attribute, int32_t* outValue)
{
Error error = kDefaultError;
- mClient->getDisplayAttribute(display, config, attribute,
- [&](const auto& tmpError, const auto& tmpValue) {
- error = tmpError;
- if (error != Error::NONE) {
- return;
- }
+ if (mClient_2_4) {
+ mClient_2_4->getDisplayAttribute_2_4(display, config, attribute,
+ [&](const auto& tmpError, const auto& tmpValue) {
+ error = static_cast<Error>(tmpError);
+ if (error != Error::NONE) {
+ return;
+ }
- *outValue = tmpValue;
- });
+ *outValue = tmpValue;
+ });
+ } else {
+ mClient->getDisplayAttribute(display, config,
+ static_cast<V2_1::IComposerClient::Attribute>(attribute),
+ [&](const auto& tmpError, const auto& tmpValue) {
+ error = tmpError;
+ if (error != Error::NONE) {
+ return;
+ }
+
+ *outValue = tmpValue;
+ });
+ }
return error;
}
@@ -1200,13 +1218,14 @@
return static_cast<Error>(error);
}
-Error Composer::getDisplayConnectionType(Display display,
- IComposerClient::DisplayConnectionType* outType) {
+V2_4::Error Composer::getDisplayConnectionType(Display display,
+ IComposerClient::DisplayConnectionType* outType) {
+ using Error = V2_4::Error;
if (!mClient_2_4) {
return Error::UNSUPPORTED;
}
- V2_4::Error error = kDefaultError_2_4;
+ Error error = kDefaultError_2_4;
mClient_2_4->getDisplayConnectionType(display, [&](const auto& tmpError, const auto& tmpType) {
error = tmpError;
if (error != V2_4::Error::NONE) {
@@ -1216,7 +1235,50 @@
*outType = tmpType;
});
- return static_cast<V2_1::Error>(error);
+ return error;
+}
+
+V2_4::Error Composer::getDisplayVsyncPeriod(Display display, VsyncPeriodNanos* outVsyncPeriod) {
+ using Error = V2_4::Error;
+ if (!mClient_2_4) {
+ return Error::UNSUPPORTED;
+ }
+
+ Error error = kDefaultError_2_4;
+ mClient_2_4->getDisplayVsyncPeriod(display,
+ [&](const auto& tmpError, const auto& tmpVsyncPeriod) {
+ error = tmpError;
+ if (error != Error::NONE) {
+ return;
+ }
+
+ *outVsyncPeriod = tmpVsyncPeriod;
+ });
+
+ return error;
+}
+
+V2_4::Error Composer::setActiveConfigWithConstraints(
+ Display display, Config config,
+ const IComposerClient::VsyncPeriodChangeConstraints& vsyncPeriodChangeConstraints,
+ VsyncPeriodChangeTimeline* outTimeline) {
+ using Error = V2_4::Error;
+ if (!mClient_2_4) {
+ return Error::UNSUPPORTED;
+ }
+
+ Error error = kDefaultError_2_4;
+ mClient_2_4->setActiveConfigWithConstraints(display, config, vsyncPeriodChangeConstraints,
+ [&](const auto& tmpError, const auto& tmpTimeline) {
+ error = tmpError;
+ if (error != Error::NONE) {
+ return;
+ }
+
+ *outTimeline = tmpTimeline;
+ });
+
+ return error;
}
CommandReader::~CommandReader()
diff --git a/services/surfaceflinger/DisplayHardware/ComposerHal.h b/services/surfaceflinger/DisplayHardware/ComposerHal.h
index e743e59..336fdd8 100644
--- a/services/surfaceflinger/DisplayHardware/ComposerHal.h
+++ b/services/surfaceflinger/DisplayHardware/ComposerHal.h
@@ -62,12 +62,14 @@
using V2_1::Config;
using V2_1::Display;
using V2_1::Error;
-using V2_1::IComposerCallback;
using V2_1::Layer;
using V2_3::CommandReaderBase;
using V2_3::CommandWriterBase;
using V2_4::IComposer;
+using V2_4::IComposerCallback;
using V2_4::IComposerClient;
+using V2_4::VsyncPeriodChangeTimeline;
+using V2_4::VsyncPeriodNanos;
using DisplayCapability = IComposerClient::DisplayCapability;
using PerFrameMetadata = IComposerClient::PerFrameMetadata;
using PerFrameMetadataKey = IComposerClient::PerFrameMetadataKey;
@@ -208,10 +210,17 @@
virtual Error setDisplayBrightness(Display display, float brightness) = 0;
// Composer HAL 2.4
+ virtual bool isVsyncPeriodSwitchSupported() = 0;
virtual Error getDisplayCapabilities(Display display,
std::vector<DisplayCapability>* outCapabilities) = 0;
- virtual Error getDisplayConnectionType(Display display,
- IComposerClient::DisplayConnectionType* outType) = 0;
+ virtual V2_4::Error getDisplayConnectionType(
+ Display display, IComposerClient::DisplayConnectionType* outType) = 0;
+ virtual V2_4::Error getDisplayVsyncPeriod(Display display,
+ VsyncPeriodNanos* outVsyncPeriod) = 0;
+ virtual V2_4::Error setActiveConfigWithConstraints(
+ Display display, Config config,
+ const IComposerClient::VsyncPeriodChangeConstraints& vsyncPeriodChangeConstraints,
+ VsyncPeriodChangeTimeline* outTimeline) = 0;
};
namespace impl {
@@ -423,10 +432,16 @@
Error setDisplayBrightness(Display display, float brightness) override;
// Composer HAL 2.4
+ bool isVsyncPeriodSwitchSupported() override { return mClient_2_4 != nullptr; }
Error getDisplayCapabilities(Display display,
std::vector<DisplayCapability>* outCapabilities) override;
- Error getDisplayConnectionType(Display display,
- IComposerClient::DisplayConnectionType* outType) override;
+ V2_4::Error getDisplayConnectionType(Display display,
+ IComposerClient::DisplayConnectionType* outType) override;
+ V2_4::Error getDisplayVsyncPeriod(Display display, VsyncPeriodNanos* outVsyncPeriod) override;
+ V2_4::Error setActiveConfigWithConstraints(
+ Display display, Config config,
+ const IComposerClient::VsyncPeriodChangeConstraints& vsyncPeriodChangeConstraints,
+ VsyncPeriodChangeTimeline* outTimeline) override;
private:
#if defined(USE_VR_COMPOSER) && USE_VR_COMPOSER
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.cpp b/services/surfaceflinger/DisplayHardware/HWC2.cpp
index 6f7428a..34254e0 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2.cpp
@@ -81,7 +81,26 @@
Return<void> onVsync(Hwc2::Display display, int64_t timestamp) override
{
- mCallback->onVsyncReceived(mSequenceId, display, timestamp);
+ mCallback->onVsyncReceived(mSequenceId, display, timestamp, std::nullopt);
+ return Void();
+ }
+
+ Return<void> onVsync_2_4(Hwc2::Display display, int64_t timestamp,
+ Hwc2::VsyncPeriodNanos vsyncPeriodNanos) override {
+ // TODO(b/140201379): use vsyncPeriodNanos in the new DispSync
+ mCallback->onVsyncReceived(mSequenceId, display, timestamp,
+ std::make_optional(vsyncPeriodNanos));
+ return Void();
+ }
+
+ Return<void> onVsyncPeriodTimingChanged(
+ Hwc2::Display display,
+ const Hwc2::VsyncPeriodChangeTimeline& updatedTimeline) override {
+ hwc_vsync_period_change_timeline_t timeline;
+ timeline.newVsyncAppliedTimeNanos = updatedTimeline.newVsyncAppliedTimeNanos;
+ timeline.refreshRequired = updatedTimeline.refreshRequired;
+ timeline.refreshTimeNanos = updatedTimeline.refreshTimeNanos;
+ mCallback->onVsyncPeriodTimingChangedReceived(mSequenceId, display, timeline);
return Void();
}
@@ -330,6 +349,36 @@
return Error::None;
}
+bool Display::isVsyncPeriodSwitchSupported() const {
+ ALOGV("[%" PRIu64 "] isVsyncPeriodSwitchSupported()", mId);
+
+ return mComposer.isVsyncPeriodSwitchSupported();
+}
+
+Error Display::getDisplayVsyncPeriod(nsecs_t* outVsyncPeriod) const {
+ ALOGV("[%" PRIu64 "] getDisplayVsyncPeriod", mId);
+
+ Error error;
+
+ if (isVsyncPeriodSwitchSupported()) {
+ Hwc2::VsyncPeriodNanos vsyncPeriodNanos = 0;
+ auto intError = mComposer.getDisplayVsyncPeriod(mId, &vsyncPeriodNanos);
+ error = static_cast<Error>(intError);
+ *outVsyncPeriod = static_cast<nsecs_t>(vsyncPeriodNanos);
+ } else {
+ // Get the default vsync period
+ hwc2_config_t configId = 0;
+ auto intError_2_1 = mComposer.getActiveConfig(mId, &configId);
+ error = static_cast<Error>(intError_2_1);
+ if (error == Error::None) {
+ auto config = mConfigs.at(configId);
+ *outVsyncPeriod = config->getVsyncPeriod();
+ }
+ }
+
+ return error;
+}
+
Error Display::getActiveConfigIndex(int* outIndex) const {
ALOGV("[%" PRIu64 "] getActiveConfigIndex", mId);
hwc2_config_t configId = 0;
@@ -345,6 +394,7 @@
auto pos = mConfigs.find(configId);
if (pos != mConfigs.end()) {
*outIndex = std::distance(mConfigs.begin(), pos);
+ ALOGV("[%" PRIu64 "] index = %d", mId, *outIndex);
} else {
ALOGE("[%" PRIu64 "] getActiveConfig returned unknown config %u", mId, configId);
// Return no error, but the caller needs to check for a negative index
@@ -582,6 +632,46 @@
return Error::None;
}
+Error Display::setActiveConfigWithConstraints(
+ const std::shared_ptr<const HWC2::Display::Config>& config,
+ const VsyncPeriodChangeConstraints& constraints, VsyncPeriodChangeTimeline* outTimeline) {
+ ALOGV("[%" PRIu64 "] setActiveConfigWithConstraints", mId);
+ if (config->getDisplayId() != mId) {
+ ALOGE("setActiveConfigWithConstraints received config %u for the wrong display %" PRIu64
+ " (expected %" PRIu64 ")",
+ config->getId(), config->getDisplayId(), mId);
+ return Error::BadConfig;
+ }
+
+ if (isVsyncPeriodSwitchSupported()) {
+ Hwc2::IComposerClient::VsyncPeriodChangeConstraints hwc2Constraints;
+ hwc2Constraints.desiredTimeNanos = constraints.desiredTimeNanos;
+ hwc2Constraints.seamlessRequired = constraints.seamlessRequired;
+
+ Hwc2::VsyncPeriodChangeTimeline vsyncPeriodChangeTimeline = {};
+ auto intError =
+ mComposer.setActiveConfigWithConstraints(mId, config->getId(), hwc2Constraints,
+ &vsyncPeriodChangeTimeline);
+ outTimeline->newVsyncAppliedTimeNanos = vsyncPeriodChangeTimeline.newVsyncAppliedTimeNanos;
+ outTimeline->refreshRequired = vsyncPeriodChangeTimeline.refreshRequired;
+ outTimeline->refreshTimeNanos = vsyncPeriodChangeTimeline.refreshTimeNanos;
+ return static_cast<Error>(intError);
+ }
+
+ // Use legacy setActiveConfig instead
+ ALOGV("fallback to legacy setActiveConfig");
+ const auto now = systemTime();
+ if (constraints.desiredTimeNanos > now || constraints.seamlessRequired) {
+ ALOGE("setActiveConfigWithConstraints received constraints that can't be satisfied");
+ }
+
+ auto intError_2_4 = mComposer.setActiveConfig(mId, config->getId());
+ outTimeline->newVsyncAppliedTimeNanos = std::max(now, constraints.desiredTimeNanos);
+ outTimeline->refreshRequired = true;
+ outTimeline->refreshTimeNanos = now;
+ return static_cast<Error>(intError_2_4);
+}
+
Error Display::setActiveConfig(const std::shared_ptr<const Config>& config)
{
if (config->getDisplayId() != mId) {
@@ -742,12 +832,13 @@
ALOGV("[%" PRIu64 "] loadConfig(%u)", mId, configId);
auto config = Config::Builder(*this, configId)
- .setWidth(getAttribute(configId, Attribute::Width))
- .setHeight(getAttribute(configId, Attribute::Height))
- .setVsyncPeriod(getAttribute(configId, Attribute::VsyncPeriod))
- .setDpiX(getAttribute(configId, Attribute::DpiX))
- .setDpiY(getAttribute(configId, Attribute::DpiY))
- .build();
+ .setWidth(getAttribute(configId, Attribute::Width))
+ .setHeight(getAttribute(configId, Attribute::Height))
+ .setVsyncPeriod(getAttribute(configId, Attribute::VsyncPeriod))
+ .setDpiX(getAttribute(configId, Attribute::DpiX))
+ .setDpiY(getAttribute(configId, Attribute::DpiY))
+ .setConfigGroup(getAttribute(configId, Attribute::ConfigGroup))
+ .build();
mConfigs.emplace(configId, std::move(config));
}
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.h b/services/surfaceflinger/DisplayHardware/HWC2.h
index b7cdf7f..81ae3b6 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2.h
@@ -38,6 +38,8 @@
#include <unordered_set>
#include <vector>
+#include "../Scheduler/StrongTyping.h"
+
namespace android {
struct DisplayedFrameStats;
class Fence;
@@ -54,6 +56,8 @@
class Display;
class Layer;
+using VsyncPeriodChangeConstraints = hwc_vsync_period_change_constraints_t;
+using VsyncPeriodChangeTimeline = hwc_vsync_period_change_timeline_t;
// Implement this interface to receive hardware composer events.
//
@@ -70,8 +74,12 @@
Connection connection) = 0;
virtual void onRefreshReceived(int32_t sequenceId,
hwc2_display_t display) = 0;
- virtual void onVsyncReceived(int32_t sequenceId, hwc2_display_t display,
- int64_t timestamp) = 0;
+ virtual void onVsyncReceived(int32_t sequenceId, hwc2_display_t display, int64_t timestamp,
+ std::optional<hwc2_vsync_period_t> vsyncPeriod) = 0;
+ virtual void onVsyncPeriodTimingChangedReceived(
+ int32_t sequenceId, hwc2_display_t display,
+ const hwc_vsync_period_change_timeline_t& updatedTimeline) = 0;
+
virtual ~ComposerCallback() = default;
};
@@ -170,6 +178,10 @@
}
return *this;
}
+ Builder& setConfigGroup(int32_t configGroup) {
+ mConfig->mConfigGroup = configGroup;
+ return *this;
+ }
private:
float getDefaultDensity();
@@ -184,6 +196,7 @@
nsecs_t getVsyncPeriod() const { return mVsyncPeriod; }
float getDpiX() const { return mDpiX; }
float getDpiY() const { return mDpiY; }
+ int32_t getConfigGroup() const { return mConfigGroup; }
private:
Config(Display& display, hwc2_config_t id);
@@ -196,12 +209,14 @@
nsecs_t mVsyncPeriod;
float mDpiX;
float mDpiY;
+ int32_t mConfigGroup;
};
virtual hwc2_display_t getId() const = 0;
virtual bool isConnected() const = 0;
virtual void setConnected(bool connected) = 0; // For use by Device only
virtual const std::unordered_set<DisplayCapability>& getCapabilities() const = 0;
+ virtual bool isVsyncPeriodSwitchSupported() const = 0;
[[clang::warn_unused_result]] virtual Error acceptChanges() = 0;
[[clang::warn_unused_result]] virtual Error createLayer(Layer** outLayer) = 0;
@@ -264,6 +279,12 @@
uint32_t* outNumTypes, uint32_t* outNumRequests,
android::sp<android::Fence>* outPresentFence, uint32_t* state) = 0;
[[clang::warn_unused_result]] virtual Error setDisplayBrightness(float brightness) const = 0;
+ [[clang::warn_unused_result]] virtual Error getDisplayVsyncPeriod(
+ nsecs_t* outVsyncPeriod) const = 0;
+ [[clang::warn_unused_result]] virtual Error setActiveConfigWithConstraints(
+ const std::shared_ptr<const HWC2::Display::Config>& config,
+ const VsyncPeriodChangeConstraints& constraints,
+ VsyncPeriodChangeTimeline* outTimeline) = 0;
};
namespace impl {
@@ -323,6 +344,10 @@
Error presentOrValidate(uint32_t* outNumTypes, uint32_t* outNumRequests,
android::sp<android::Fence>* outPresentFence, uint32_t* state) override;
Error setDisplayBrightness(float brightness) const override;
+ Error getDisplayVsyncPeriod(nsecs_t* outVsyncPeriod) const override;
+ Error setActiveConfigWithConstraints(const std::shared_ptr<const HWC2::Display::Config>& config,
+ const VsyncPeriodChangeConstraints& constraints,
+ VsyncPeriodChangeTimeline* outTimeline) override;
// Other Display methods
hwc2_display_t getId() const override { return mId; }
@@ -331,6 +356,7 @@
const std::unordered_set<DisplayCapability>& getCapabilities() const override {
return mDisplayCapabilities;
};
+ virtual bool isVsyncPeriodSwitchSupported() const override;
private:
int32_t getAttribute(hwc2_config_t configId, Attribute attribute);
@@ -355,7 +381,9 @@
bool mIsConnected;
DisplayType mType;
std::unordered_map<hwc2_layer_t, std::unique_ptr<Layer>> mLayers;
+
std::unordered_map<hwc2_config_t, std::shared_ptr<const Config>> mConfigs;
+
std::once_flag mDisplayCapabilityQueryFlag;
std::unordered_set<DisplayCapability> mDisplayCapabilities;
};
diff --git a/services/surfaceflinger/FrameTracer/FrameTracer.cpp b/services/surfaceflinger/FrameTracer/FrameTracer.cpp
index 3a0408e..6f91843 100644
--- a/services/surfaceflinger/FrameTracer/FrameTracer.cpp
+++ b/services/surfaceflinger/FrameTracer/FrameTracer.cpp
@@ -44,52 +44,52 @@
FrameTracerDataSource::Register(dsd);
}
-void FrameTracer::traceNewLayer(int32_t layerID, const std::string& layerName) {
- FrameTracerDataSource::Trace([this, layerID, &layerName](FrameTracerDataSource::TraceContext) {
- if (mTraceTracker.find(layerID) == mTraceTracker.end()) {
+void FrameTracer::traceNewLayer(int32_t layerId, const std::string& layerName) {
+ FrameTracerDataSource::Trace([this, layerId, &layerName](FrameTracerDataSource::TraceContext) {
+ if (mTraceTracker.find(layerId) == mTraceTracker.end()) {
std::lock_guard<std::mutex> lock(mTraceMutex);
- mTraceTracker[layerID].layerName = layerName;
+ mTraceTracker[layerId].layerName = layerName;
}
});
}
-void FrameTracer::traceTimestamp(int32_t layerID, uint64_t bufferID, uint64_t frameNumber,
+void FrameTracer::traceTimestamp(int32_t layerId, uint64_t bufferID, uint64_t frameNumber,
nsecs_t timestamp, FrameEvent::BufferEventType type,
nsecs_t duration) {
- FrameTracerDataSource::Trace([this, layerID, bufferID, frameNumber, timestamp, type,
+ FrameTracerDataSource::Trace([this, layerId, bufferID, frameNumber, timestamp, type,
duration](FrameTracerDataSource::TraceContext ctx) {
std::lock_guard<std::mutex> lock(mTraceMutex);
- if (mTraceTracker.find(layerID) == mTraceTracker.end()) {
+ if (mTraceTracker.find(layerId) == mTraceTracker.end()) {
return;
}
// Handle any pending fences for this buffer.
- tracePendingFencesLocked(ctx, layerID, bufferID);
+ tracePendingFencesLocked(ctx, layerId, bufferID);
// Complete current trace.
- traceLocked(ctx, layerID, bufferID, frameNumber, timestamp, type, duration);
+ traceLocked(ctx, layerId, bufferID, frameNumber, timestamp, type, duration);
});
}
-void FrameTracer::traceFence(int32_t layerID, uint64_t bufferID, uint64_t frameNumber,
+void FrameTracer::traceFence(int32_t layerId, uint64_t bufferID, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& fence,
FrameEvent::BufferEventType type, nsecs_t startTime) {
- FrameTracerDataSource::Trace([this, layerID, bufferID, frameNumber, &fence, type,
+ FrameTracerDataSource::Trace([this, layerId, bufferID, frameNumber, &fence, type,
startTime](FrameTracerDataSource::TraceContext ctx) {
const nsecs_t signalTime = fence->getSignalTime();
if (signalTime != Fence::SIGNAL_TIME_INVALID) {
std::lock_guard<std::mutex> lock(mTraceMutex);
- if (mTraceTracker.find(layerID) == mTraceTracker.end()) {
+ if (mTraceTracker.find(layerId) == mTraceTracker.end()) {
return;
}
// Handle any pending fences for this buffer.
- tracePendingFencesLocked(ctx, layerID, bufferID);
+ tracePendingFencesLocked(ctx, layerId, bufferID);
if (signalTime != Fence::SIGNAL_TIME_PENDING) {
- traceSpanLocked(ctx, layerID, bufferID, frameNumber, type, startTime, signalTime);
+ traceSpanLocked(ctx, layerId, bufferID, frameNumber, type, startTime, signalTime);
} else {
- mTraceTracker[layerID].pendingFences[bufferID].push_back(
+ mTraceTracker[layerId].pendingFences[bufferID].push_back(
{.frameNumber = frameNumber,
.type = type,
.fence = fence,
@@ -100,9 +100,9 @@
}
void FrameTracer::tracePendingFencesLocked(FrameTracerDataSource::TraceContext& ctx,
- int32_t layerID, uint64_t bufferID) {
- if (mTraceTracker[layerID].pendingFences.count(bufferID)) {
- auto& pendingFences = mTraceTracker[layerID].pendingFences[bufferID];
+ int32_t layerId, uint64_t bufferID) {
+ if (mTraceTracker[layerId].pendingFences.count(bufferID)) {
+ auto& pendingFences = mTraceTracker[layerId].pendingFences[bufferID];
for (size_t i = 0; i < pendingFences.size(); ++i) {
auto& pendingFence = pendingFences[i];
@@ -116,7 +116,7 @@
if (signalTime != Fence::SIGNAL_TIME_INVALID &&
systemTime() - signalTime < kFenceSignallingDeadline) {
- traceSpanLocked(ctx, layerID, bufferID, pendingFence.frameNumber, pendingFence.type,
+ traceSpanLocked(ctx, layerId, bufferID, pendingFence.frameNumber, pendingFence.type,
pendingFence.startTime, signalTime);
}
@@ -126,7 +126,7 @@
}
}
-void FrameTracer::traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+void FrameTracer::traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID, uint64_t frameNumber, nsecs_t timestamp,
FrameEvent::BufferEventType type, nsecs_t duration) {
auto packet = ctx.NewTracePacket();
@@ -138,9 +138,9 @@
}
event->set_type(type);
- if (mTraceTracker.find(layerID) != mTraceTracker.end() &&
- !mTraceTracker[layerID].layerName.empty()) {
- const std::string& layerName = mTraceTracker[layerID].layerName;
+ if (mTraceTracker.find(layerId) != mTraceTracker.end() &&
+ !mTraceTracker[layerId].layerName.empty()) {
+ const std::string& layerName = mTraceTracker[layerId].layerName;
event->set_layer_name(layerName.c_str(), layerName.size());
}
@@ -149,7 +149,7 @@
}
}
-void FrameTracer::traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+void FrameTracer::traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID, uint64_t frameNumber,
FrameEvent::BufferEventType type, nsecs_t startTime,
nsecs_t endTime) {
@@ -159,12 +159,12 @@
timestamp = startTime;
duration = endTime - startTime;
}
- traceLocked(ctx, layerID, bufferID, frameNumber, timestamp, type, duration);
+ traceLocked(ctx, layerId, bufferID, frameNumber, timestamp, type, duration);
}
-void FrameTracer::onDestroy(int32_t layerID) {
+void FrameTracer::onDestroy(int32_t layerId) {
std::lock_guard<std::mutex> traceLock(mTraceMutex);
- mTraceTracker.erase(layerID);
+ mTraceTracker.erase(layerId);
}
std::string FrameTracer::miniDump() {
diff --git a/services/surfaceflinger/FrameTracer/FrameTracer.h b/services/surfaceflinger/FrameTracer/FrameTracer.h
index e91a750..ef5df90 100644
--- a/services/surfaceflinger/FrameTracer/FrameTracer.h
+++ b/services/surfaceflinger/FrameTracer/FrameTracer.h
@@ -47,21 +47,21 @@
void registerDataSource();
// Starts tracking a new layer for tracing. Needs to be called once before traceTimestamp() or
// traceFence() for each layer.
- void traceNewLayer(int32_t layerID, const std::string& layerName);
+ void traceNewLayer(int32_t layerId, const std::string& layerName);
// Creates a trace point at the timestamp provided.
- void traceTimestamp(int32_t layerID, uint64_t bufferID, uint64_t frameNumber, nsecs_t timestamp,
+ void traceTimestamp(int32_t layerId, uint64_t bufferID, uint64_t frameNumber, nsecs_t timestamp,
FrameEvent::BufferEventType type, nsecs_t duration = 0);
// Creates a trace point after the provided fence has been signalled. If a startTime is provided
// the trace will have be timestamped from startTime until fence signalling time. If no
// startTime is provided, a durationless trace point will be created timestamped at fence
// signalling time. If the fence hasn't signalled yet, the trace point will be created the next
// time after signalling a trace call for this buffer occurs.
- void traceFence(int32_t layerID, uint64_t bufferID, uint64_t frameNumber,
+ void traceFence(int32_t layerId, uint64_t bufferID, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& fence, FrameEvent::BufferEventType type,
nsecs_t startTime = 0);
// Takes care of cleanup when a layer is destroyed.
- void onDestroy(int32_t layerID);
+ void onDestroy(int32_t layerId);
std::string miniDump();
@@ -88,15 +88,15 @@
// Checks if any pending fences for a layer and buffer have signalled and, if they have, creates
// trace points for them.
- void tracePendingFencesLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+ void tracePendingFencesLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID);
// Creates a trace point by translating a start time and an end time to a timestamp and
// duration. If startTime is later than end time it sets end time as the timestamp and the
// duration to 0. Used by traceFence().
- void traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID,
+ void traceSpanLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId,
uint64_t bufferID, uint64_t frameNumber, FrameEvent::BufferEventType type,
nsecs_t startTime, nsecs_t endTime);
- void traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerID, uint64_t bufferID,
+ void traceLocked(FrameTracerDataSource::TraceContext& ctx, int32_t layerId, uint64_t bufferID,
uint64_t frameNumber, nsecs_t timestamp, FrameEvent::BufferEventType type,
nsecs_t duration = 0);
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 1d0ce10..ce9aab5 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1360,9 +1360,9 @@
void Layer::onDisconnect() {
Mutex::Autolock lock(mFrameEventHistoryMutex);
mFrameEventHistory.onDisconnect();
- const int32_t layerID = getSequence();
- mFlinger->mTimeStats->onDestroy(layerID);
- mFlinger->mFrameTracer->onDestroy(layerID);
+ const int32_t layerId = getSequence();
+ mFlinger->mTimeStats->onDestroy(layerId);
+ mFlinger->mFrameTracer->onDestroy(layerId);
}
void Layer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
@@ -1370,6 +1370,8 @@
if (newTimestamps) {
mFlinger->mTimeStats->setPostTime(getSequence(), newTimestamps->frameNumber,
getName().c_str(), newTimestamps->postedTime);
+ mFlinger->mTimeStats->setAcquireFence(getSequence(), newTimestamps->frameNumber,
+ newTimestamps->acquireFence);
}
Mutex::Autolock lock(mFrameEventHistoryMutex);
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 1388612..7abcd0f 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -531,7 +531,7 @@
* called after composition.
* returns true if the layer latched a new buffer this frame.
*/
- virtual bool onPostComposition(const std::optional<DisplayId>& /*displayId*/,
+ virtual bool onPostComposition(sp<const DisplayDevice> /*displayDevice*/,
const std::shared_ptr<FenceTime>& /*glDoneFence*/,
const std::shared_ptr<FenceTime>& /*presentFence*/,
const CompositorTiming& /*compositorTiming*/) {
diff --git a/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp b/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp
new file mode 100644
index 0000000..7dc98cc
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RefreshRateConfigs.h"
+
+namespace android::scheduler {
+using RefreshRate = RefreshRateConfigs::RefreshRate;
+using RefreshRateType = RefreshRateConfigs::RefreshRateType;
+
+// Returns the refresh rate map. This map won't be modified at runtime, so it's safe to access
+// from multiple threads. This can only be called if refreshRateSwitching() returns true.
+// TODO(b/122916473): Get this information from configs prepared by vendors, instead of
+// baking them in.
+const std::map<RefreshRateType, RefreshRate>& RefreshRateConfigs::getRefreshRateMap() const {
+ LOG_ALWAYS_FATAL_IF(!mRefreshRateSwitchingSupported);
+ return mRefreshRateMap;
+}
+
+const RefreshRate& RefreshRateConfigs::getRefreshRateFromType(RefreshRateType type) const {
+ if (!mRefreshRateSwitchingSupported) {
+ return getCurrentRefreshRate().second;
+ } else {
+ auto refreshRate = mRefreshRateMap.find(type);
+ LOG_ALWAYS_FATAL_IF(refreshRate == mRefreshRateMap.end());
+ return refreshRate->second;
+ }
+}
+
+std::pair<RefreshRateType, const RefreshRate&> RefreshRateConfigs::getCurrentRefreshRate() const {
+ int currentConfig = mCurrentConfig;
+ if (mRefreshRateSwitchingSupported) {
+ for (const auto& [type, refresh] : mRefreshRateMap) {
+ if (refresh.configId == currentConfig) {
+ return {type, refresh};
+ }
+ }
+ LOG_ALWAYS_FATAL();
+ }
+ return {RefreshRateType::DEFAULT, mRefreshRates[currentConfig]};
+}
+
+const RefreshRate& RefreshRateConfigs::getRefreshRateFromConfigId(int configId) const {
+ LOG_ALWAYS_FATAL_IF(configId >= mRefreshRates.size());
+ return mRefreshRates[configId];
+}
+
+RefreshRateType RefreshRateConfigs::getRefreshRateTypeFromHwcConfigId(hwc2_config_t hwcId) const {
+ if (!mRefreshRateSwitchingSupported) return RefreshRateType::DEFAULT;
+
+ for (const auto& [type, refreshRate] : mRefreshRateMap) {
+ if (refreshRate.hwcId == hwcId) {
+ return type;
+ }
+ }
+
+ return RefreshRateType::DEFAULT;
+}
+
+void RefreshRateConfigs::setCurrentConfig(int config) {
+ LOG_ALWAYS_FATAL_IF(config >= mRefreshRates.size());
+ mCurrentConfig = config;
+}
+
+RefreshRateConfigs::RefreshRateConfigs(bool refreshRateSwitching,
+ const std::vector<InputConfig>& configs, int currentConfig) {
+ init(refreshRateSwitching, configs, currentConfig);
+}
+
+RefreshRateConfigs::RefreshRateConfigs(
+ bool refreshRateSwitching,
+ const std::vector<std::shared_ptr<const HWC2::Display::Config>>& configs,
+ int currentConfig) {
+ std::vector<InputConfig> inputConfigs;
+ for (const auto& config : configs) {
+ inputConfigs.push_back({config->getId(), config->getVsyncPeriod()});
+ }
+ init(refreshRateSwitching, inputConfigs, currentConfig);
+}
+
+void RefreshRateConfigs::init(bool refreshRateSwitching, const std::vector<InputConfig>& configs,
+ int currentConfig) {
+ mRefreshRateSwitchingSupported = refreshRateSwitching;
+ LOG_ALWAYS_FATAL_IF(configs.empty());
+ LOG_ALWAYS_FATAL_IF(currentConfig >= configs.size());
+ mCurrentConfig = currentConfig;
+
+ auto buildRefreshRate = [&](int configId) -> RefreshRate {
+ const nsecs_t vsyncPeriod = configs[configId].vsyncPeriod;
+ const float fps = 1e9 / vsyncPeriod;
+ return {configId, base::StringPrintf("%2.ffps", fps), static_cast<uint32_t>(fps),
+ vsyncPeriod, configs[configId].hwcId};
+ };
+
+ for (int i = 0; i < configs.size(); ++i) {
+ mRefreshRates.push_back(buildRefreshRate(i));
+ }
+
+ if (!mRefreshRateSwitchingSupported) return;
+
+ auto findDefaultAndPerfConfigs = [&]() -> std::optional<std::pair<int, int>> {
+ if (configs.size() < 2) {
+ return {};
+ }
+
+ std::vector<const RefreshRate*> sortedRefreshRates;
+ for (const auto& refreshRate : mRefreshRates) {
+ sortedRefreshRates.push_back(&refreshRate);
+ }
+ std::sort(sortedRefreshRates.begin(), sortedRefreshRates.end(),
+ [](const RefreshRate* refreshRate1, const RefreshRate* refreshRate2) {
+ return refreshRate1->vsyncPeriod > refreshRate2->vsyncPeriod;
+ });
+
+ // When the configs are ordered by the resync rate, we assume that
+ // the first one is DEFAULT and the second one is PERFORMANCE,
+ // i.e. the higher rate.
+ if (sortedRefreshRates[0]->vsyncPeriod == 0 || sortedRefreshRates[1]->vsyncPeriod == 0) {
+ return {};
+ }
+
+ return std::pair<int, int>(sortedRefreshRates[0]->configId,
+ sortedRefreshRates[1]->configId);
+ };
+
+ auto defaultAndPerfConfigs = findDefaultAndPerfConfigs();
+ if (!defaultAndPerfConfigs) {
+ mRefreshRateSwitchingSupported = false;
+ return;
+ }
+
+ mRefreshRateMap[RefreshRateType::DEFAULT] = mRefreshRates[defaultAndPerfConfigs->first];
+ mRefreshRateMap[RefreshRateType::PERFORMANCE] = mRefreshRates[defaultAndPerfConfigs->second];
+}
+
+} // namespace android::scheduler
\ No newline at end of file
diff --git a/services/surfaceflinger/Scheduler/RefreshRateConfigs.h b/services/surfaceflinger/Scheduler/RefreshRateConfigs.h
index 2fd100f..90bba24 100644
--- a/services/surfaceflinger/Scheduler/RefreshRateConfigs.h
+++ b/services/surfaceflinger/Scheduler/RefreshRateConfigs.h
@@ -66,55 +66,17 @@
// from multiple threads. This can only be called if refreshRateSwitching() returns true.
// TODO(b/122916473): Get this information from configs prepared by vendors, instead of
// baking them in.
- const std::map<RefreshRateType, RefreshRate>& getRefreshRateMap() const {
- LOG_ALWAYS_FATAL_IF(!mRefreshRateSwitchingSupported);
- return mRefreshRateMap;
- }
+ const std::map<RefreshRateType, RefreshRate>& getRefreshRateMap() const;
- const RefreshRate& getRefreshRateFromType(RefreshRateType type) const {
- if (!mRefreshRateSwitchingSupported) {
- return getCurrentRefreshRate().second;
- } else {
- auto refreshRate = mRefreshRateMap.find(type);
- LOG_ALWAYS_FATAL_IF(refreshRate == mRefreshRateMap.end());
- return refreshRate->second;
- }
- }
+ const RefreshRate& getRefreshRateFromType(RefreshRateType type) const;
- std::pair<RefreshRateType, const RefreshRate&> getCurrentRefreshRate() const {
- int currentConfig = mCurrentConfig;
- if (mRefreshRateSwitchingSupported) {
- for (const auto& [type, refresh] : mRefreshRateMap) {
- if (refresh.configId == currentConfig) {
- return {type, refresh};
- }
- }
- LOG_ALWAYS_FATAL();
- }
- return {RefreshRateType::DEFAULT, mRefreshRates[currentConfig]};
- }
+ std::pair<RefreshRateType, const RefreshRate&> getCurrentRefreshRate() const;
- const RefreshRate& getRefreshRateFromConfigId(int configId) const {
- LOG_ALWAYS_FATAL_IF(configId >= mRefreshRates.size());
- return mRefreshRates[configId];
- }
+ const RefreshRate& getRefreshRateFromConfigId(int configId) const;
- RefreshRateType getRefreshRateTypeFromHwcConfigId(hwc2_config_t hwcId) const {
- if (!mRefreshRateSwitchingSupported) return RefreshRateType::DEFAULT;
+ RefreshRateType getRefreshRateTypeFromHwcConfigId(hwc2_config_t hwcId) const;
- for (const auto& [type, refreshRate] : mRefreshRateMap) {
- if (refreshRate.hwcId == hwcId) {
- return type;
- }
- }
-
- return RefreshRateType::DEFAULT;
- }
-
- void setCurrentConfig(int config) {
- LOG_ALWAYS_FATAL_IF(config >= mRefreshRates.size());
- mCurrentConfig = config;
- }
+ void setCurrentConfig(int config);
struct InputConfig {
hwc2_config_t hwcId = 0;
@@ -122,78 +84,15 @@
};
RefreshRateConfigs(bool refreshRateSwitching, const std::vector<InputConfig>& configs,
- int currentConfig) {
- init(refreshRateSwitching, configs, currentConfig);
- }
+ int currentConfig);
RefreshRateConfigs(bool refreshRateSwitching,
const std::vector<std::shared_ptr<const HWC2::Display::Config>>& configs,
- int currentConfig) {
- std::vector<InputConfig> inputConfigs;
- for (const auto& config : configs) {
- inputConfigs.push_back({config->getId(), config->getVsyncPeriod()});
- }
- init(refreshRateSwitching, inputConfigs, currentConfig);
- }
+ int currentConfig);
private:
void init(bool refreshRateSwitching, const std::vector<InputConfig>& configs,
- int currentConfig) {
- mRefreshRateSwitchingSupported = refreshRateSwitching;
- LOG_ALWAYS_FATAL_IF(configs.empty());
- LOG_ALWAYS_FATAL_IF(currentConfig >= configs.size());
- mCurrentConfig = currentConfig;
-
- auto buildRefreshRate = [&](int configId) -> RefreshRate {
- const nsecs_t vsyncPeriod = configs[configId].vsyncPeriod;
- const float fps = 1e9 / vsyncPeriod;
- return {configId, base::StringPrintf("%2.ffps", fps), static_cast<uint32_t>(fps),
- vsyncPeriod, configs[configId].hwcId};
- };
-
- for (int i = 0; i < configs.size(); ++i) {
- mRefreshRates.push_back(buildRefreshRate(i));
- }
-
- if (!mRefreshRateSwitchingSupported) return;
-
- auto findDefaultAndPerfConfigs = [&]() -> std::optional<std::pair<int, int>> {
- if (configs.size() < 2) {
- return {};
- }
-
- std::vector<const RefreshRate*> sortedRefreshRates;
- for (const auto& refreshRate : mRefreshRates) {
- sortedRefreshRates.push_back(&refreshRate);
- }
- std::sort(sortedRefreshRates.begin(), sortedRefreshRates.end(),
- [](const RefreshRate* refreshRate1, const RefreshRate* refreshRate2) {
- return refreshRate1->vsyncPeriod > refreshRate2->vsyncPeriod;
- });
-
- // When the configs are ordered by the resync rate, we assume that
- // the first one is DEFAULT and the second one is PERFORMANCE,
- // i.e. the higher rate.
- if (sortedRefreshRates[0]->vsyncPeriod == 0 ||
- sortedRefreshRates[1]->vsyncPeriod == 0) {
- return {};
- }
-
- return std::pair<int, int>(sortedRefreshRates[0]->configId,
- sortedRefreshRates[1]->configId);
- };
-
- auto defaultAndPerfConfigs = findDefaultAndPerfConfigs();
- if (!defaultAndPerfConfigs) {
- mRefreshRateSwitchingSupported = false;
- return;
- }
-
- mRefreshRateMap[RefreshRateType::DEFAULT] = mRefreshRates[defaultAndPerfConfigs->first];
- mRefreshRateMap[RefreshRateType::PERFORMANCE] =
- mRefreshRates[defaultAndPerfConfigs->second];
- }
-
+ int currentConfig);
// Whether this device is doing refresh rate switching or not. This must not change after this
// object is initialized.
bool mRefreshRateSwitchingSupported;
diff --git a/services/surfaceflinger/Scheduler/Timer.cpp b/services/surfaceflinger/Scheduler/Timer.cpp
new file mode 100644
index 0000000..2394ed2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "SchedulerTimer"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include <log/log.h>
+#include <sys/epoll.h>
+#include <sys/timerfd.h>
+#include <sys/unistd.h>
+#include <utils/Trace.h>
+#include <chrono>
+#include <cstdint>
+
+#include "Timer.h"
+
+namespace android::scheduler {
+
+static constexpr size_t kReadPipe = 0;
+static constexpr size_t kWritePipe = 1;
+
+template <class T, size_t N>
+constexpr size_t arrayLen(T (&)[N]) {
+ return N;
+}
+
+Timer::Timer()
+ : mTimerFd(timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK)),
+ mEpollFd(epoll_create1(EPOLL_CLOEXEC)) {
+ if (pipe2(mPipes.data(), O_CLOEXEC | O_NONBLOCK)) {
+ ALOGE("could not create TimerDispatch mPipes");
+ };
+
+ mDispatchThread = std::thread(std::bind(&Timer::dispatch, this));
+}
+
+Timer::~Timer() {
+ endDispatch();
+ mDispatchThread.join();
+
+ close(mPipes[kWritePipe]);
+ close(mPipes[kReadPipe]);
+ close(mEpollFd);
+ close(mTimerFd);
+}
+
+void Timer::endDispatch() {
+ static constexpr unsigned char end = 'e';
+ write(mPipes[kWritePipe], &end, sizeof(end));
+}
+
+nsecs_t Timer::now() const {
+ return systemTime(SYSTEM_TIME_MONOTONIC);
+}
+
+constexpr char const* timerTraceTag = "AlarmInNs";
+void Timer::alarmIn(std::function<void()> const& cb, nsecs_t fireIn) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, fireIn);
+
+ using namespace std::literals;
+ static constexpr int ns_per_s =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(1s).count();
+
+ mCallback = cb;
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {.tv_sec = static_cast<long>(fireIn / ns_per_s),
+ .tv_nsec = static_cast<long>(fireIn % ns_per_s)},
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to set timerfd");
+ }
+}
+
+void Timer::alarmCancel() {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ ATRACE_INT64(timerTraceTag, 0);
+
+ struct itimerspec old_timer;
+ struct itimerspec new_timer {
+ .it_interval = {.tv_sec = 0, .tv_nsec = 0},
+ .it_value = {
+ .tv_sec = 0,
+ .tv_nsec = 0,
+ },
+ };
+
+ if (timerfd_settime(mTimerFd, 0, &new_timer, &old_timer)) {
+ ALOGW("Failed to disarm timerfd");
+ }
+}
+
+void Timer::dispatch() {
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m) != 0) {
+ ALOGW("Failed to set SCHED_FIFO on dispatch thread");
+ }
+
+ if (pthread_setname_np(pthread_self(), "TimerDispatch")) {
+ ALOGW("Failed to set thread name on dispatch thread");
+ }
+
+ enum DispatchType : uint32_t { TIMER, TERMINATE, MAX_DISPATCH_TYPE };
+ epoll_event timerEvent;
+ timerEvent.events = EPOLLIN;
+ timerEvent.data.u32 = DispatchType::TIMER;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mTimerFd, &timerEvent) == -1) {
+ ALOGE("Error adding timer fd to epoll dispatch loop");
+ return;
+ }
+
+ epoll_event terminateEvent;
+ terminateEvent.events = EPOLLIN;
+ terminateEvent.data.u32 = DispatchType::TERMINATE;
+ if (epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mPipes[kReadPipe], &terminateEvent) == -1) {
+ ALOGE("Error adding control fd to dispatch loop");
+ return;
+ }
+
+ uint64_t iteration = 0;
+ char const traceNamePrefix[] = "TimerIteration #";
+ static constexpr size_t max64print = std::numeric_limits<decltype(iteration)>::digits10;
+ static constexpr size_t maxlen = arrayLen(traceNamePrefix) + max64print;
+ std::array<char, maxlen> str_buffer;
+ auto timing = true;
+ while (timing) {
+ epoll_event events[DispatchType::MAX_DISPATCH_TYPE];
+ int nfds = epoll_wait(mEpollFd, events, DispatchType::MAX_DISPATCH_TYPE, -1);
+
+ if (ATRACE_ENABLED()) {
+ snprintf(str_buffer.data(), str_buffer.size(), "%s%" PRIu64, traceNamePrefix,
+ iteration++);
+ ATRACE_NAME(str_buffer.data());
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR) {
+ timing = false;
+ continue;
+ }
+ }
+
+ for (auto i = 0; i < nfds; i++) {
+ if (events[i].data.u32 == DispatchType::TIMER) {
+ static uint64_t mIgnored = 0;
+ read(mTimerFd, &mIgnored, sizeof(mIgnored));
+ std::function<void()> cb;
+ {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ cb = mCallback;
+ }
+ if (cb) {
+ cb();
+ }
+ }
+ if (events[i].data.u32 == DispatchType::TERMINATE) {
+ timing = false;
+ }
+ }
+ }
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/Timer.h b/services/surfaceflinger/Scheduler/Timer.h
new file mode 100644
index 0000000..0ae82c8
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/Timer.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "TimeKeeper.h"
+
+#include <android-base/thread_annotations.h>
+#include <array>
+#include <thread>
+
+namespace android::scheduler {
+
+class Timer : public TimeKeeper {
+public:
+ Timer();
+ ~Timer();
+ nsecs_t now() const final;
+
+ // NB: alarmIn and alarmCancel are threadsafe; with the last-returning function being effectual
+ // Most users will want to serialize thes calls so as to be aware of the timer state.
+ void alarmIn(std::function<void()> const& cb, nsecs_t fireIn) final;
+ void alarmCancel() final;
+
+private:
+ int const mTimerFd;
+ int const mEpollFd;
+ std::array<int, 2> mPipes;
+
+ std::thread mDispatchThread;
+ void dispatch();
+ void endDispatch();
+
+ std::mutex mMutex;
+ std::function<void()> mCallback GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
new file mode 100644
index 0000000..643c5d2
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+//#define LOG_NDEBUG 0
+#include "VSyncPredictor.h"
+#include <android-base/logging.h>
+#include <cutils/compiler.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <algorithm>
+#include <chrono>
+#include "SchedulerUtils.h"
+
+namespace android::scheduler {
+static auto constexpr kNeedsSamplesTag = "SamplesRequested";
+static auto constexpr kMaxPercent = 100u;
+
+VSyncPredictor::~VSyncPredictor() = default;
+
+VSyncPredictor::VSyncPredictor(nsecs_t idealPeriod, size_t historySize,
+ size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent)
+ : kHistorySize(historySize),
+ kMinimumSamplesForPrediction(minimumSamplesForPrediction),
+ kOutlierTolerancePercent(std::min(outlierTolerancePercent, kMaxPercent)),
+ mIdealPeriod(idealPeriod) {
+ mRateMap[mIdealPeriod] = {idealPeriod, 0};
+}
+
+inline size_t VSyncPredictor::next(int i) const {
+ return (i + 1) % timestamps.size();
+}
+
+bool VSyncPredictor::validate(nsecs_t timestamp) const {
+ if (lastTimestampIndex < 0 || timestamps.empty()) {
+ return true;
+ }
+
+ auto const aValidTimestamp = timestamps[lastTimestampIndex];
+ auto const percent = (timestamp - aValidTimestamp) % mIdealPeriod * kMaxPercent / mIdealPeriod;
+ return percent < kOutlierTolerancePercent || percent > (kMaxPercent - kOutlierTolerancePercent);
+}
+
+void VSyncPredictor::addVsyncTimestamp(nsecs_t timestamp) {
+ std::lock_guard<std::mutex> lk(mMutex);
+
+ if (!validate(timestamp)) {
+ ALOGW("timestamp was too far off the last known timestamp");
+ return;
+ }
+
+ if (timestamps.size() != kHistorySize) {
+ timestamps.push_back(timestamp);
+ lastTimestampIndex = next(lastTimestampIndex);
+ } else {
+ lastTimestampIndex = next(lastTimestampIndex);
+ timestamps[lastTimestampIndex] = timestamp;
+ }
+
+ if (timestamps.size() < kMinimumSamplesForPrediction) {
+ mRateMap[mIdealPeriod] = {mIdealPeriod, 0};
+ return;
+ }
+
+ // This is a 'simple linear regression' calculation of Y over X, with Y being the
+ // vsync timestamps, and X being the ordinal of vsync count.
+ // The calculated slope is the vsync period.
+ // Formula for reference:
+ // Sigma_i: means sum over all timestamps.
+ // mean(variable): statistical mean of variable.
+ // X: snapped ordinal of the timestamp
+ // Y: vsync timestamp
+ //
+ // Sigma_i( (X_i - mean(X)) * (Y_i - mean(Y) )
+ // slope = -------------------------------------------
+ // Sigma_i ( X_i - mean(X) ) ^ 2
+ //
+ // intercept = mean(Y) - slope * mean(X)
+ //
+ std::vector<nsecs_t> vsyncTS(timestamps.size());
+ std::vector<nsecs_t> ordinals(timestamps.size());
+
+ // normalizing to the oldest timestamp cuts down on error in calculating the intercept.
+ auto const oldest_ts = *std::min_element(timestamps.begin(), timestamps.end());
+ auto it = mRateMap.find(mIdealPeriod);
+ auto const currentPeriod = std::get<0>(it->second);
+ // TODO (b/144707443): its important that there's some precision in the mean of the ordinals
+ // for the intercept calculation, so scale the ordinals by 10 to continue
+ // fixed point calculation. Explore expanding
+ // scheduler::utils::calculate_mean to have a fixed point fractional part.
+ static constexpr int kScalingFactor = 10;
+
+ for (auto i = 0u; i < timestamps.size(); i++) {
+ vsyncTS[i] = timestamps[i] - oldest_ts;
+ ordinals[i] = ((vsyncTS[i] + (currentPeriod / 2)) / currentPeriod) * kScalingFactor;
+ }
+
+ auto meanTS = scheduler::calculate_mean(vsyncTS);
+ auto meanOrdinal = scheduler::calculate_mean(ordinals);
+ for (auto i = 0; i < vsyncTS.size(); i++) {
+ vsyncTS[i] -= meanTS;
+ ordinals[i] -= meanOrdinal;
+ }
+
+ auto top = 0ll;
+ auto bottom = 0ll;
+ for (auto i = 0; i < vsyncTS.size(); i++) {
+ top += vsyncTS[i] * ordinals[i];
+ bottom += ordinals[i] * ordinals[i];
+ }
+
+ if (CC_UNLIKELY(bottom == 0)) {
+ it->second = {mIdealPeriod, 0};
+ return;
+ }
+
+ nsecs_t const anticipatedPeriod = top / bottom * kScalingFactor;
+ nsecs_t const intercept = meanTS - (anticipatedPeriod * meanOrdinal / kScalingFactor);
+
+ it->second = {anticipatedPeriod, intercept};
+
+ ALOGV("model update ts: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, timestamp,
+ anticipatedPeriod, intercept);
+}
+
+nsecs_t VSyncPredictor::nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const {
+ std::lock_guard<std::mutex> lk(mMutex);
+
+ auto const [slope, intercept] = getVSyncPredictionModel(lk);
+
+ if (timestamps.empty()) {
+ auto const knownTimestamp = mKnownTimestamp ? *mKnownTimestamp : timePoint;
+ auto const numPeriodsOut = ((timePoint - knownTimestamp) / mIdealPeriod) + 1;
+ return knownTimestamp + numPeriodsOut * mIdealPeriod;
+ }
+
+ auto const oldest = *std::min_element(timestamps.begin(), timestamps.end());
+ auto const ordinalRequest = (timePoint - oldest + slope) / slope;
+ auto const prediction = (ordinalRequest * slope) + intercept + oldest;
+
+ ALOGV("prediction made from: %" PRId64 " prediction: %" PRId64 " (+%" PRId64 ") slope: %" PRId64
+ " intercept: %" PRId64,
+ timePoint, prediction, prediction - timePoint, slope, intercept);
+ return prediction;
+}
+
+std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel() const {
+ std::lock_guard<std::mutex> lk(mMutex);
+ return VSyncPredictor::getVSyncPredictionModel(lk);
+}
+
+std::tuple<nsecs_t, nsecs_t> VSyncPredictor::getVSyncPredictionModel(
+ std::lock_guard<std::mutex> const&) const {
+ return mRateMap.find(mIdealPeriod)->second;
+}
+
+void VSyncPredictor::setPeriod(nsecs_t period) {
+ ATRACE_CALL();
+
+ std::lock_guard<std::mutex> lk(mMutex);
+ static constexpr size_t kSizeLimit = 30;
+ if (CC_UNLIKELY(mRateMap.size() == kSizeLimit)) {
+ mRateMap.erase(mRateMap.begin());
+ }
+
+ mIdealPeriod = period;
+ if (mRateMap.find(period) == mRateMap.end()) {
+ mRateMap[mIdealPeriod] = {period, 0};
+ }
+
+ if (!timestamps.empty()) {
+ mKnownTimestamp = *std::max_element(timestamps.begin(), timestamps.end());
+ timestamps.clear();
+ lastTimestampIndex = 0;
+ }
+}
+
+bool VSyncPredictor::needsMoreSamples(nsecs_t now) const {
+ using namespace std::literals::chrono_literals;
+ std::lock_guard<std::mutex> lk(mMutex);
+ bool needsMoreSamples = true;
+ if (timestamps.size() >= kMinimumSamplesForPrediction) {
+ nsecs_t constexpr aLongTime =
+ std::chrono::duration_cast<std::chrono::nanoseconds>(500ms).count();
+ if (!(lastTimestampIndex < 0 || timestamps.empty())) {
+ auto const lastTimestamp = timestamps[lastTimestampIndex];
+ needsMoreSamples = !((lastTimestamp + aLongTime) > now);
+ }
+ }
+
+ ATRACE_INT(kNeedsSamplesTag, needsMoreSamples);
+ return needsMoreSamples;
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
new file mode 100644
index 0000000..1590f49
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+#include "VSyncTracker.h"
+
+namespace android::scheduler {
+
+class VSyncPredictor : public VSyncTracker {
+public:
+ /*
+ * \param [in] idealPeriod The initial ideal period to use.
+ * \param [in] historySize The internal amount of entries to store in the model.
+ * \param [in] minimumSamplesForPrediction The minimum number of samples to collect before
+ * predicting. \param [in] outlierTolerancePercent a number 0 to 100 that will be used to filter
+ * samples that fall outlierTolerancePercent from an anticipated vsync event.
+ */
+ VSyncPredictor(nsecs_t idealPeriod, size_t historySize, size_t minimumSamplesForPrediction,
+ uint32_t outlierTolerancePercent);
+ ~VSyncPredictor();
+
+ void addVsyncTimestamp(nsecs_t timestamp) final;
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final;
+
+ /*
+ * Inform the model that the period is anticipated to change to a new value.
+ * model will use the period parameter to predict vsync events until enough
+ * timestamps with the new period have been collected.
+ *
+ * \param [in] period The new period that should be used.
+ */
+ void setPeriod(nsecs_t period);
+
+ /* Query if the model is in need of more samples to make a prediction at timePoint.
+ * \param [in] timePoint The timePoint to inquire of.
+ * \return True, if model would benefit from more samples, False if not.
+ */
+ bool needsMoreSamples(nsecs_t timePoint) const;
+
+ std::tuple<nsecs_t /* slope */, nsecs_t /* intercept */> getVSyncPredictionModel() const;
+
+private:
+ VSyncPredictor(VSyncPredictor const&) = delete;
+ VSyncPredictor& operator=(VSyncPredictor const&) = delete;
+
+ size_t const kHistorySize;
+ size_t const kMinimumSamplesForPrediction;
+ size_t const kOutlierTolerancePercent;
+
+ std::mutex mutable mMutex;
+ size_t next(int i) const REQUIRES(mMutex);
+ bool validate(nsecs_t timestamp) const REQUIRES(mMutex);
+ std::tuple<nsecs_t, nsecs_t> getVSyncPredictionModel(std::lock_guard<std::mutex> const&) const
+ REQUIRES(mMutex);
+
+ nsecs_t mIdealPeriod GUARDED_BY(mMutex);
+ std::optional<nsecs_t> mKnownTimestamp GUARDED_BY(mMutex);
+
+ std::unordered_map<nsecs_t, std::tuple<nsecs_t, nsecs_t>> mutable mRateMap GUARDED_BY(mMutex);
+
+ int lastTimestampIndex GUARDED_BY(mMutex) = 0;
+ std::vector<nsecs_t> timestamps GUARDED_BY(mMutex);
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index ec15bad..bf3b4c9 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1363,9 +1363,12 @@
}
void SurfaceFlinger::onVsyncReceived(int32_t sequenceId, hwc2_display_t hwcDisplayId,
- int64_t timestamp) {
+ int64_t timestamp,
+ std::optional<hwc2_vsync_period_t> /*vsyncPeriod*/) {
ATRACE_NAME("SF onVsync");
+ // TODO(b/140201379): use vsyncPeriod in the new DispSync
+
Mutex::Autolock lock(mStateLock);
// Ignore any vsyncs from a previous hardware composer.
if (sequenceId != getBE().mComposerSequenceId) {
@@ -1442,6 +1445,12 @@
setTransactionFlags(eDisplayTransactionNeeded);
}
+void SurfaceFlinger::onVsyncPeriodTimingChangedReceived(
+ int32_t /*sequenceId*/, hwc2_display_t /*display*/,
+ const hwc_vsync_period_change_timeline_t& /*updatedTimeline*/) {
+ // TODO(b/142753004): use timeline when changing refresh rate
+}
+
void SurfaceFlinger::onRefreshReceived(int sequenceId, hwc2_display_t /*hwcDisplayId*/) {
Mutex::Autolock lock(mStateLock);
if (sequenceId != getBE().mComposerSequenceId) {
@@ -1601,6 +1610,7 @@
ATRACE_CALL();
switch (what) {
case MessageQueue::INVALIDATE: {
+ const nsecs_t frameStart = systemTime();
// calculate the expected present time once and use the cached
// value throughout this frame to make sure all layers are
// seeing this same value.
@@ -1665,6 +1675,13 @@
// Signal a refresh if a transaction modified the window state,
// a new buffer was latched, or if HWC has requested a full
// repaint
+ if (mFrameStartTime <= 0) {
+ // We should only use the time of the first invalidate
+ // message that signals a refresh as the beginning of the
+ // frame. Otherwise the real frame time will be
+ // underestimated.
+ mFrameStartTime = frameStart;
+ }
signalRefresh();
}
break;
@@ -1743,6 +1760,9 @@
mGeometryInvalid = false;
mCompositionEngine->present(refreshArgs);
+ mTimeStats->recordFrameDuration(mFrameStartTime, systemTime());
+ // Reset the frame start time now that we've recorded this frame.
+ mFrameStartTime = 0;
postFrame();
postComposition();
@@ -1896,9 +1916,8 @@
}
mDrawingState.traverseInZOrder([&](Layer* layer) {
- bool frameLatched =
- layer->onPostComposition(displayDevice->getId(), glCompositionDoneFenceTime,
- presentFenceTime, compositorTiming);
+ bool frameLatched = layer->onPostComposition(displayDevice, glCompositionDoneFenceTime,
+ presentFenceTime, compositorTiming);
if (frameLatched) {
recordBufferingStats(layer->getName(), layer->getOccupancyHistory(false));
}
@@ -3714,6 +3733,8 @@
if (currentMode == HWC_POWER_MODE_OFF) {
// Turn on the display
+ // TODO: @vhau temp fix only! See b/141111965
+ mTransactionCompletedThread.clearAllPending();
getHwComposer().setPowerMode(*displayId, mode);
if (display->isPrimary() && mode != HWC_POWER_MODE_DOZE_SUSPEND) {
setVsyncEnabledInHWC(*displayId, mHWCVsyncPendingState);
@@ -4384,7 +4405,8 @@
case GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES:
case SET_DISPLAY_CONTENT_SAMPLING_ENABLED:
case GET_DISPLAYED_CONTENT_SAMPLE:
- case NOTIFY_POWER_HINT: {
+ case NOTIFY_POWER_HINT:
+ case SET_GLOBAL_SHADOW_SETTINGS: {
if (!callingThreadHasUnscopedSurfaceFlingerAccess()) {
IPCThreadState* ipc = IPCThreadState::self();
ALOGE("Permission Denial: can't access SurfaceFlinger pid=%d, uid=%d",
@@ -5501,6 +5523,12 @@
getRenderEngine().unbindExternalTextureBuffer(clientCacheId.id);
}
+status_t SurfaceFlinger::setGlobalShadowSettings(const half4& /*ambientColor*/,
+ const half4& /*spotColor*/, float /*lightPosY*/,
+ float /*lightPosZ*/, float /*lightRadius*/) {
+ return NO_ERROR;
+}
+
} // namespace android
#if defined(__gl_h_)
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 50b3ae4..8e1199c 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -470,7 +470,8 @@
bool* outSupport) const override;
status_t setDisplayBrightness(const sp<IBinder>& displayToken, float brightness) const override;
status_t notifyPowerHint(int32_t hintId) override;
-
+ status_t setGlobalShadowSettings(const half4& ambientColor, const half4& spotColor,
+ float lightPosY, float lightPosZ, float lightRadius) override;
/* ------------------------------------------------------------------------
* DeathRecipient interface
*/
@@ -484,11 +485,14 @@
/* ------------------------------------------------------------------------
* HWC2::ComposerCallback / HWComposer::EventHandler interface
*/
- void onVsyncReceived(int32_t sequenceId, hwc2_display_t hwcDisplayId,
- int64_t timestamp) override;
+ void onVsyncReceived(int32_t sequenceId, hwc2_display_t hwcDisplayId, int64_t timestamp,
+ std::optional<hwc2_vsync_period_t> vsyncPeriod) override;
void onHotplugReceived(int32_t sequenceId, hwc2_display_t hwcDisplayId,
HWC2::Connection connection) override;
void onRefreshReceived(int32_t sequenceId, hwc2_display_t hwcDisplayId) override;
+ void onVsyncPeriodTimingChangedReceived(
+ int32_t sequenceId, hwc2_display_t display,
+ const hwc_vsync_period_change_timeline_t& updatedTimeline) override;
/* ------------------------------------------------------------------------
* Message handling
@@ -1148,6 +1152,9 @@
bool mPendingSyncInputWindows GUARDED_BY(mStateLock);
Hwc2::impl::PowerAdvisor mPowerAdvisor;
+ // This should only be accessed on the main thread.
+ nsecs_t mFrameStartTime = 0;
+
std::unique_ptr<RefreshRateOverlay> mRefreshRateOverlay;
// Flag used to set override allowed display configs from backdoor
diff --git a/services/surfaceflinger/TimeStats/TimeStats.cpp b/services/surfaceflinger/TimeStats/TimeStats.cpp
index 3e47ec6..626efb8 100644
--- a/services/surfaceflinger/TimeStats/TimeStats.cpp
+++ b/services/surfaceflinger/TimeStats/TimeStats.cpp
@@ -20,14 +20,13 @@
#include "TimeStats.h"
#include <android-base/stringprintf.h>
-
#include <log/log.h>
-
#include <utils/String8.h>
#include <utils/Timers.h>
#include <utils/Trace.h>
#include <algorithm>
+#include <chrono>
namespace android {
@@ -113,9 +112,26 @@
mTimeStats.clientCompositionFrames++;
}
-bool TimeStats::recordReadyLocked(int32_t layerID, TimeRecord* timeRecord) {
+static int32_t msBetween(nsecs_t start, nsecs_t end) {
+ int64_t delta = std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::nanoseconds(end - start))
+ .count();
+ delta = std::clamp(delta, int64_t(INT32_MIN), int64_t(INT32_MAX));
+ return static_cast<int32_t>(delta);
+}
+
+void TimeStats::recordFrameDuration(nsecs_t startTime, nsecs_t endTime) {
+ if (!mEnabled.load()) return;
+
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (mPowerTime.powerMode == HWC_POWER_MODE_NORMAL) {
+ mTimeStats.frameDuration.insert(msBetween(startTime, endTime));
+ }
+}
+
+bool TimeStats::recordReadyLocked(int32_t layerId, TimeRecord* timeRecord) {
if (!timeRecord->ready) {
- ALOGV("[%d]-[%" PRIu64 "]-presentFence is still not received", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-presentFence is still not received", layerId,
timeRecord->frameTime.frameNumber);
return false;
}
@@ -128,7 +144,7 @@
timeRecord->frameTime.acquireTime = timeRecord->acquireFence->getSignalTime();
timeRecord->acquireFence = nullptr;
} else {
- ALOGV("[%d]-[%" PRIu64 "]-acquireFence signal time is invalid", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-acquireFence signal time is invalid", layerId,
timeRecord->frameTime.frameNumber);
}
}
@@ -141,7 +157,7 @@
timeRecord->frameTime.presentTime = timeRecord->presentFence->getSignalTime();
timeRecord->presentFence = nullptr;
} else {
- ALOGV("[%d]-[%" PRIu64 "]-presentFence signal time invalid", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-presentFence signal time invalid", layerId,
timeRecord->frameTime.frameNumber);
}
}
@@ -149,21 +165,15 @@
return true;
}
-static int32_t msBetween(nsecs_t start, nsecs_t end) {
- int64_t delta = (end - start) / 1000000;
- delta = std::clamp(delta, int64_t(INT32_MIN), int64_t(INT32_MAX));
- return static_cast<int32_t>(delta);
-}
-
-void TimeStats::flushAvailableRecordsToStatsLocked(int32_t layerID) {
+void TimeStats::flushAvailableRecordsToStatsLocked(int32_t layerId) {
ATRACE_CALL();
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
TimeRecord& prevTimeRecord = layerRecord.prevTimeRecord;
std::deque<TimeRecord>& timeRecords = layerRecord.timeRecords;
while (!timeRecords.empty()) {
- if (!recordReadyLocked(layerID, &timeRecords[0])) break;
- ALOGV("[%d]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerID,
+ if (!recordReadyLocked(layerId, &timeRecords[0])) break;
+ ALOGV("[%d]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerId,
timeRecords[0].frameTime.frameNumber, timeRecords[0].frameTime.presentTime);
if (prevTimeRecord.ready) {
@@ -178,37 +188,37 @@
const int32_t postToAcquireMs = msBetween(timeRecords[0].frameTime.postTime,
timeRecords[0].frameTime.acquireTime);
- ALOGV("[%d]-[%" PRIu64 "]-post2acquire[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-post2acquire[%d]", layerId,
timeRecords[0].frameTime.frameNumber, postToAcquireMs);
timeStatsLayer.deltas["post2acquire"].insert(postToAcquireMs);
const int32_t postToPresentMs = msBetween(timeRecords[0].frameTime.postTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-post2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-post2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, postToPresentMs);
timeStatsLayer.deltas["post2present"].insert(postToPresentMs);
const int32_t acquireToPresentMs = msBetween(timeRecords[0].frameTime.acquireTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-acquire2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-acquire2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, acquireToPresentMs);
timeStatsLayer.deltas["acquire2present"].insert(acquireToPresentMs);
const int32_t latchToPresentMs = msBetween(timeRecords[0].frameTime.latchTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-latch2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-latch2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, latchToPresentMs);
timeStatsLayer.deltas["latch2present"].insert(latchToPresentMs);
const int32_t desiredToPresentMs = msBetween(timeRecords[0].frameTime.desiredTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-desired2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-desired2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, desiredToPresentMs);
timeStatsLayer.deltas["desired2present"].insert(desiredToPresentMs);
const int32_t presentToPresentMs = msBetween(prevTimeRecord.frameTime.presentTime,
timeRecords[0].frameTime.presentTime);
- ALOGV("[%d]-[%" PRIu64 "]-present2present[%d]", layerID,
+ ALOGV("[%d]-[%" PRIu64 "]-present2present[%d]", layerId,
timeRecords[0].frameTime.frameNumber, presentToPresentMs);
timeStatsLayer.deltas["present2present"].insert(presentToPresentMs);
}
@@ -227,28 +237,28 @@
layerName.compare(0, kMinLenLayerName, kPopupWindowPrefix) != 0;
}
-void TimeStats::setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName,
+void TimeStats::setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
nsecs_t postTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-[%s]-PostTime[%" PRId64 "]", layerID, frameNumber, layerName.c_str(),
+ ALOGV("[%d]-[%" PRIu64 "]-[%s]-PostTime[%" PRId64 "]", layerId, frameNumber, layerName.c_str(),
postTime);
std::lock_guard<std::mutex> lock(mMutex);
if (!mTimeStats.stats.count(layerName) && mTimeStats.stats.size() >= MAX_NUM_LAYER_STATS) {
return;
}
- if (!mTimeStatsTracker.count(layerID) && mTimeStatsTracker.size() < MAX_NUM_LAYER_RECORDS &&
+ if (!mTimeStatsTracker.count(layerId) && mTimeStatsTracker.size() < MAX_NUM_LAYER_RECORDS &&
layerNameIsValid(layerName)) {
- mTimeStatsTracker[layerID].layerName = layerName;
+ mTimeStatsTracker[layerId].layerName = layerName;
}
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.timeRecords.size() == MAX_NUM_TIME_RECORDS) {
ALOGE("[%d]-[%s]-timeRecords is at its maximum size[%zu]. Ignore this when unittesting.",
- layerID, layerRecord.layerName.c_str(), MAX_NUM_TIME_RECORDS);
- mTimeStatsTracker.erase(layerID);
+ layerId, layerRecord.layerName.c_str(), MAX_NUM_TIME_RECORDS);
+ mTimeStatsTracker.erase(layerId);
return;
}
// For most media content, the acquireFence is invalid because the buffer is
@@ -270,15 +280,15 @@
layerRecord.waitData = layerRecord.timeRecords.size() - 1;
}
-void TimeStats::setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime) {
+void TimeStats::setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerID, frameNumber, latchTime);
+ ALOGV("[%d]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerId, frameNumber, latchTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -288,15 +298,15 @@
}
}
-void TimeStats::setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime) {
+void TimeStats::setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerID, frameNumber, desiredTime);
+ ALOGV("[%d]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerId, frameNumber, desiredTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -306,15 +316,15 @@
}
}
-void TimeStats::setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime) {
+void TimeStats::setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerID, frameNumber, acquireTime);
+ ALOGV("[%d]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerId, frameNumber, acquireTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -324,17 +334,17 @@
}
}
-void TimeStats::setAcquireFence(int32_t layerID, uint64_t frameNumber,
+void TimeStats::setAcquireFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& acquireFence) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerID, frameNumber,
+ ALOGV("[%d]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerId, frameNumber,
acquireFence->getSignalTime());
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -344,15 +354,15 @@
}
}
-void TimeStats::setPresentTime(int32_t layerID, uint64_t frameNumber, nsecs_t presentTime) {
+void TimeStats::setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerID, frameNumber, presentTime);
+ ALOGV("[%d]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerId, frameNumber, presentTime);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -363,20 +373,20 @@
layerRecord.waitData++;
}
- flushAvailableRecordsToStatsLocked(layerID);
+ flushAvailableRecordsToStatsLocked(layerId);
}
-void TimeStats::setPresentFence(int32_t layerID, uint64_t frameNumber,
+void TimeStats::setPresentFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& presentFence) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerID, frameNumber,
+ ALOGV("[%d]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerId, frameNumber,
presentFence->getSignalTime());
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
if (layerRecord.waitData < 0 ||
layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
return;
@@ -387,25 +397,25 @@
layerRecord.waitData++;
}
- flushAvailableRecordsToStatsLocked(layerID);
+ flushAvailableRecordsToStatsLocked(layerId);
}
-void TimeStats::onDestroy(int32_t layerID) {
+void TimeStats::onDestroy(int32_t layerId) {
ATRACE_CALL();
- ALOGV("[%d]-onDestroy", layerID);
+ ALOGV("[%d]-onDestroy", layerId);
std::lock_guard<std::mutex> lock(mMutex);
- mTimeStatsTracker.erase(layerID);
+ mTimeStatsTracker.erase(layerId);
}
-void TimeStats::removeTimeRecord(int32_t layerID, uint64_t frameNumber) {
+void TimeStats::removeTimeRecord(int32_t layerId, uint64_t frameNumber) {
if (!mEnabled.load()) return;
ATRACE_CALL();
- ALOGV("[%d]-[%" PRIu64 "]-removeTimeRecord", layerID, frameNumber);
+ ALOGV("[%d]-[%" PRIu64 "]-removeTimeRecord", layerId, frameNumber);
std::lock_guard<std::mutex> lock(mMutex);
- if (!mTimeStatsTracker.count(layerID)) return;
- LayerRecord& layerRecord = mTimeStatsTracker[layerID];
+ if (!mTimeStatsTracker.count(layerId)) return;
+ LayerRecord& layerRecord = mTimeStatsTracker[layerId];
size_t removeAt = 0;
for (const TimeRecord& record : layerRecord.timeRecords) {
if (record.frameTime.frameNumber == frameNumber) break;
diff --git a/services/surfaceflinger/TimeStats/TimeStats.h b/services/surfaceflinger/TimeStats/TimeStats.h
index 1313132..670bc8e 100644
--- a/services/surfaceflinger/TimeStats/TimeStats.h
+++ b/services/surfaceflinger/TimeStats/TimeStats.h
@@ -44,20 +44,27 @@
virtual void incrementMissedFrames() = 0;
virtual void incrementClientCompositionFrames() = 0;
- virtual void setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName,
+ // Records the start and end times for a frame.
+ // The start time is the same as the beginning of a SurfaceFlinger
+ // invalidate message.
+ // The end time corresponds to when SurfaceFlinger finishes submitting the
+ // request to HWC to present a frame.
+ virtual void recordFrameDuration(nsecs_t startTime, nsecs_t endTime) = 0;
+
+ virtual void setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
nsecs_t postTime) = 0;
- virtual void setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime) = 0;
- virtual void setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime) = 0;
- virtual void setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime) = 0;
- virtual void setAcquireFence(int32_t layerID, uint64_t frameNumber,
+ virtual void setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) = 0;
+ virtual void setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) = 0;
+ virtual void setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) = 0;
+ virtual void setAcquireFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& acquireFence) = 0;
- virtual void setPresentTime(int32_t layerID, uint64_t frameNumber, nsecs_t presentTime) = 0;
- virtual void setPresentFence(int32_t layerID, uint64_t frameNumber,
+ virtual void setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime) = 0;
+ virtual void setPresentFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& presentFence) = 0;
// Clean up the layer record
- virtual void onDestroy(int32_t layerID) = 0;
+ virtual void onDestroy(int32_t layerId) = 0;
// If SF skips or rejects a buffer, remove the corresponding TimeRecord.
- virtual void removeTimeRecord(int32_t layerID, uint64_t frameNumber) = 0;
+ virtual void removeTimeRecord(int32_t layerId, uint64_t frameNumber) = 0;
virtual void setPowerMode(int32_t powerMode) = 0;
// Source of truth is RefrehRateStats.
@@ -116,20 +123,22 @@
void incrementMissedFrames() override;
void incrementClientCompositionFrames() override;
- void setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName,
+ void recordFrameDuration(nsecs_t startTime, nsecs_t endTime) override;
+
+ void setPostTime(int32_t layerId, uint64_t frameNumber, const std::string& layerName,
nsecs_t postTime) override;
- void setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime) override;
- void setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime) override;
- void setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime) override;
- void setAcquireFence(int32_t layerID, uint64_t frameNumber,
+ void setLatchTime(int32_t layerId, uint64_t frameNumber, nsecs_t latchTime) override;
+ void setDesiredTime(int32_t layerId, uint64_t frameNumber, nsecs_t desiredTime) override;
+ void setAcquireTime(int32_t layerId, uint64_t frameNumber, nsecs_t acquireTime) override;
+ void setAcquireFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& acquireFence) override;
- void setPresentTime(int32_t layerID, uint64_t frameNumber, nsecs_t presentTime) override;
- void setPresentFence(int32_t layerID, uint64_t frameNumber,
+ void setPresentTime(int32_t layerId, uint64_t frameNumber, nsecs_t presentTime) override;
+ void setPresentFence(int32_t layerId, uint64_t frameNumber,
const std::shared_ptr<FenceTime>& presentFence) override;
// Clean up the layer record
- void onDestroy(int32_t layerID) override;
+ void onDestroy(int32_t layerId) override;
// If SF skips or rejects a buffer, remove the corresponding TimeRecord.
- void removeTimeRecord(int32_t layerID, uint64_t frameNumber) override;
+ void removeTimeRecord(int32_t layerId, uint64_t frameNumber) override;
void setPowerMode(int32_t powerMode) override;
// Source of truth is RefrehRateStats.
@@ -139,8 +148,8 @@
static const size_t MAX_NUM_TIME_RECORDS = 64;
private:
- bool recordReadyLocked(int32_t layerID, TimeRecord* timeRecord);
- void flushAvailableRecordsToStatsLocked(int32_t layerID);
+ bool recordReadyLocked(int32_t layerId, TimeRecord* timeRecord);
+ void flushAvailableRecordsToStatsLocked(int32_t layerId);
void flushPowerTimeLocked();
void flushAvailableGlobalRecordsToStatsLocked();
@@ -152,7 +161,7 @@
std::atomic<bool> mEnabled = false;
std::mutex mMutex;
TimeStatsHelper::TimeStatsGlobal mTimeStats;
- // Hashmap for LayerRecord with layerID as the hash key
+ // Hashmap for LayerRecord with layerId as the hash key
std::unordered_map<int32_t, LayerRecord> mTimeStatsTracker;
PowerTime mPowerTime;
GlobalRecord mGlobalRecord;
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp b/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
index 16d2da0..83cd45a 100644
--- a/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
+++ b/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
@@ -111,6 +111,8 @@
StringAppendF(&result, "totalP2PTime = %" PRId64 " ms\n", presentToPresent.totalTime());
StringAppendF(&result, "presentToPresent histogram is as below:\n");
result.append(presentToPresent.toString());
+ StringAppendF(&result, "frameDuration histogram is as below:\n");
+ result.append(frameDuration.toString());
const auto dumpStats = generateDumpStats(maxLayers);
for (const auto& ele : dumpStats) {
result.append(ele->toString());
@@ -158,6 +160,11 @@
histProto->set_time_millis(histEle.first);
histProto->set_frame_count(histEle.second);
}
+ for (const auto& histEle : frameDuration.hist) {
+ SFTimeStatsHistogramBucketProto* histProto = globalProto.add_frame_duration();
+ histProto->set_time_millis(histEle.first);
+ histProto->set_frame_count(histEle.second);
+ }
const auto dumpStats = generateDumpStats(maxLayers);
for (const auto& ele : dumpStats) {
SFTimeStatsLayerProto* layerProto = globalProto.add_stats();
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
index f2ac7ff..6b28970 100644
--- a/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
+++ b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
@@ -61,6 +61,7 @@
int32_t clientCompositionFrames = 0;
int64_t displayOnTime = 0;
Histogram presentToPresent;
+ Histogram frameDuration;
std::unordered_map<std::string, TimeStatsLayer> stats;
std::unordered_map<uint32_t, nsecs_t> refreshRateStats;
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto b/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
index 0dacbeb..96430b3 100644
--- a/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
+++ b/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
@@ -25,7 +25,7 @@
// changes to these messages, and keep google3 side proto messages in sync if
// the end to end pipeline needs to be updated.
-// Next tag: 10
+// Next tag: 11
message SFTimeStatsGlobalProto {
// The stats start time in UTC as seconds since January 1, 1970
optional int64 stats_start = 1;
@@ -43,6 +43,8 @@
repeated SFTimeStatsDisplayConfigBucketProto display_config_stats = 9;
// Present to present histogram.
repeated SFTimeStatsHistogramBucketProto present_to_present = 8;
+ // Frame CPU duration histogram.
+ repeated SFTimeStatsHistogramBucketProto frame_duration = 10;
// Stats per layer. Apps could have multiple layers.
repeated SFTimeStatsLayerProto stats = 6;
}
diff --git a/services/surfaceflinger/TransactionCompletedThread.cpp b/services/surfaceflinger/TransactionCompletedThread.cpp
index 8db03db..c15355d 100644
--- a/services/surfaceflinger/TransactionCompletedThread.cpp
+++ b/services/surfaceflinger/TransactionCompletedThread.cpp
@@ -189,6 +189,15 @@
return NO_ERROR;
}
+void TransactionCompletedThread::clearAllPending() {
+ std::lock_guard lock(mMutex);
+ if (!mRunning) {
+ return;
+ }
+ mPendingTransactions.clear();
+ mConditionVariable.notify_all();
+}
+
status_t TransactionCompletedThread::registerUnpresentedCallbackHandle(
const sp<CallbackHandle>& handle) {
std::lock_guard lock(mMutex);
diff --git a/services/surfaceflinger/TransactionCompletedThread.h b/services/surfaceflinger/TransactionCompletedThread.h
index 12ea8fe..cd95bfb 100644
--- a/services/surfaceflinger/TransactionCompletedThread.h
+++ b/services/surfaceflinger/TransactionCompletedThread.h
@@ -70,6 +70,8 @@
// Notifies the TransactionCompletedThread that a pending CallbackHandle has been presented.
status_t finalizePendingCallbackHandles(const std::deque<sp<CallbackHandle>>& handles);
+ void clearAllPending();
+
// Adds the Transaction CallbackHandle from a layer that does not need to be relatched and
// presented this frame.
status_t registerUnpresentedCallbackHandle(const sp<CallbackHandle>& handle);
diff --git a/services/surfaceflinger/sysprop/SurfaceFlingerProperties.sysprop b/services/surfaceflinger/sysprop/SurfaceFlingerProperties.sysprop
index 51b20cb..049c872 100644
--- a/services/surfaceflinger/sysprop/SurfaceFlingerProperties.sysprop
+++ b/services/surfaceflinger/sysprop/SurfaceFlingerProperties.sysprop
@@ -260,7 +260,7 @@
prop {
api_name: "color_space_agnostic_dataspace"
type: Long
- scope: System
+ scope: Public
access: Readonly
prop_name: "ro.surface_flinger.color_space_agnostic_dataspace"
}
@@ -339,7 +339,7 @@
prop {
api_name: "set_display_power_timer_ms"
type: Integer
- scope: System
+ scope: Public
access: Readonly
prop_name: "ro.surface_flinger.set_display_power_timer_ms"
}
diff --git a/services/surfaceflinger/tests/Android.bp b/services/surfaceflinger/tests/Android.bp
index d021fc2..6b0737c 100644
--- a/services/surfaceflinger/tests/Android.bp
+++ b/services/surfaceflinger/tests/Android.bp
@@ -18,6 +18,7 @@
test_suites: ["device-tests"],
srcs: [
"BufferGenerator.cpp",
+ "CommonTypes_test.cpp",
"Credentials_test.cpp",
"DereferenceSurfaceControl_test.cpp",
"DisplayActiveConfig_test.cpp",
@@ -41,6 +42,8 @@
"libtrace_proto",
],
shared_libs: [
+ "android.hardware.graphics.common@1.2",
+ "android.hardware.graphics.composer@2.1",
"libandroid",
"libbinder",
"libcutils",
@@ -53,6 +56,7 @@
"libtimestats_proto",
"libui",
"libutils",
+ "vintf-graphics-common-ndk_platform",
]
}
diff --git a/services/surfaceflinger/tests/CommonTypes_test.cpp b/services/surfaceflinger/tests/CommonTypes_test.cpp
new file mode 100644
index 0000000..a3e16f9
--- /dev/null
+++ b/services/surfaceflinger/tests/CommonTypes_test.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <aidl/android/hardware/graphics/common/BlendMode.h>
+#include <aidl/android/hardware/graphics/common/Dataspace.h>
+
+#include <android/hardware/graphics/common/1.2/types.h>
+#include <android/hardware/graphics/composer/2.1/IComposerClient.h>
+
+using AidlBlendMode = aidl::android::hardware::graphics::common::BlendMode;
+using AidlDataspace = aidl::android::hardware::graphics::common::Dataspace;
+
+using HidlBlendMode = android::hardware::graphics::composer::V2_1::IComposerClient::BlendMode;
+using HidlDataspace = android::hardware::graphics::common::V1_2::Dataspace;
+
+static_assert(static_cast<uint32_t>(AidlBlendMode::INVALID) ==
+ static_cast<uint32_t>(HidlBlendMode::INVALID));
+static_assert(static_cast<uint32_t>(AidlBlendMode::NONE) ==
+ static_cast<uint32_t>(HidlBlendMode::NONE));
+static_assert(static_cast<uint32_t>(AidlBlendMode::PREMULTIPLIED) ==
+ static_cast<uint32_t>(HidlBlendMode::PREMULTIPLIED));
+static_assert(static_cast<uint32_t>(AidlBlendMode::COVERAGE) ==
+ static_cast<uint32_t>(HidlBlendMode::COVERAGE));
+
+static_assert(static_cast<uint32_t>(AidlDataspace::UNKNOWN) ==
+ static_cast<uint32_t>(HidlDataspace::UNKNOWN));
+static_assert(static_cast<uint32_t>(AidlDataspace::ARBITRARY) ==
+ static_cast<uint32_t>(HidlDataspace::ARBITRARY));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_SHIFT) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_SHIFT));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_MASK) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_MASK));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_UNSPECIFIED) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_UNSPECIFIED));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT709) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT709));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT601_625) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT601_625));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT601_625_UNADJUSTED) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT601_625_UNADJUSTED));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT601_525) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT601_525));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT601_525_UNADJUSTED) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT601_525_UNADJUSTED));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT2020) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT2020));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT2020_CONSTANT_LUMINANCE) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT2020_CONSTANT_LUMINANCE));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_BT470M) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_BT470M));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_FILM) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_FILM));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_DCI_P3) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_DCI_P3));
+static_assert(static_cast<uint32_t>(AidlDataspace::STANDARD_ADOBE_RGB) ==
+ static_cast<uint32_t>(HidlDataspace::STANDARD_ADOBE_RGB));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_SHIFT) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_SHIFT));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_MASK) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_MASK));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_UNSPECIFIED) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_UNSPECIFIED));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_LINEAR) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_SRGB) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_SRGB));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_SMPTE_170M) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_SMPTE_170M));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_GAMMA2_2) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_GAMMA2_2));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_GAMMA2_6) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_GAMMA2_6));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_GAMMA2_8) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_GAMMA2_8));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_ST2084) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_ST2084));
+static_assert(static_cast<uint32_t>(AidlDataspace::TRANSFER_HLG) ==
+ static_cast<uint32_t>(HidlDataspace::TRANSFER_HLG));
+static_assert(static_cast<uint32_t>(AidlDataspace::RANGE_SHIFT) ==
+ static_cast<uint32_t>(HidlDataspace::RANGE_SHIFT));
+static_assert(static_cast<uint32_t>(AidlDataspace::RANGE_MASK) ==
+ static_cast<uint32_t>(HidlDataspace::RANGE_MASK));
+static_assert(static_cast<uint32_t>(AidlDataspace::RANGE_UNSPECIFIED) ==
+ static_cast<uint32_t>(HidlDataspace::RANGE_UNSPECIFIED));
+static_assert(static_cast<uint32_t>(AidlDataspace::RANGE_FULL) ==
+ static_cast<uint32_t>(HidlDataspace::RANGE_FULL));
+static_assert(static_cast<uint32_t>(AidlDataspace::RANGE_LIMITED) ==
+ static_cast<uint32_t>(HidlDataspace::RANGE_LIMITED));
+static_assert(static_cast<uint32_t>(AidlDataspace::RANGE_EXTENDED) ==
+ static_cast<uint32_t>(HidlDataspace::RANGE_EXTENDED));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_SRGB_LINEAR) ==
+ static_cast<uint32_t>(HidlDataspace::V0_SRGB_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_SCRGB_LINEAR) ==
+ static_cast<uint32_t>(HidlDataspace::V0_SCRGB_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_SRGB) ==
+ static_cast<uint32_t>(HidlDataspace::V0_SRGB));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_SCRGB) ==
+ static_cast<uint32_t>(HidlDataspace::V0_SCRGB));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_JFIF) ==
+ static_cast<uint32_t>(HidlDataspace::V0_JFIF));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_BT601_625) ==
+ static_cast<uint32_t>(HidlDataspace::V0_BT601_625));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_BT601_525) ==
+ static_cast<uint32_t>(HidlDataspace::V0_BT601_525));
+static_assert(static_cast<uint32_t>(AidlDataspace::V0_BT709) ==
+ static_cast<uint32_t>(HidlDataspace::V0_BT709));
+static_assert(static_cast<uint32_t>(AidlDataspace::DCI_P3_LINEAR) ==
+ static_cast<uint32_t>(HidlDataspace::DCI_P3_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::DCI_P3) ==
+ static_cast<uint32_t>(HidlDataspace::DCI_P3));
+static_assert(static_cast<uint32_t>(AidlDataspace::DISPLAY_P3_LINEAR) ==
+ static_cast<uint32_t>(HidlDataspace::DISPLAY_P3_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::DISPLAY_P3) ==
+ static_cast<uint32_t>(HidlDataspace::DISPLAY_P3));
+static_assert(static_cast<uint32_t>(AidlDataspace::ADOBE_RGB) ==
+ static_cast<uint32_t>(HidlDataspace::ADOBE_RGB));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020_LINEAR) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020_PQ) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020_PQ));
+static_assert(static_cast<uint32_t>(AidlDataspace::DEPTH) ==
+ static_cast<uint32_t>(HidlDataspace::DEPTH));
+static_assert(static_cast<uint32_t>(AidlDataspace::SENSOR) ==
+ static_cast<uint32_t>(HidlDataspace::SENSOR));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020_ITU) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020_ITU));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020_ITU_PQ) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020_ITU_PQ));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020_ITU_HLG) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020_ITU_HLG));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT2020_HLG) ==
+ static_cast<uint32_t>(HidlDataspace::BT2020_HLG));
+static_assert(static_cast<uint32_t>(AidlDataspace::DISPLAY_BT2020) ==
+ static_cast<uint32_t>(HidlDataspace::DISPLAY_BT2020));
+static_assert(static_cast<uint32_t>(AidlDataspace::DYNAMIC_DEPTH) ==
+ static_cast<uint32_t>(HidlDataspace::DYNAMIC_DEPTH));
+static_assert(static_cast<uint32_t>(AidlDataspace::JPEG_APP_SEGMENTS) ==
+ static_cast<uint32_t>(HidlDataspace::JPEG_APP_SEGMENTS));
+static_assert(static_cast<uint32_t>(AidlDataspace::HEIF) ==
+ static_cast<uint32_t>(HidlDataspace::HEIF));
+
+// Below are the dataspaces that have been deprecated for sometime. They are required to behave
+// the same as their V0_* counterparts. We redefined them in AIDL to be the same as the
+// their V0_* counterparts.
+static_assert(static_cast<uint32_t>(AidlDataspace::SRGB_LINEAR) ==
+ static_cast<uint32_t>(AidlDataspace::V0_SRGB_LINEAR));
+static_assert(static_cast<uint32_t>(AidlDataspace::SRGB) ==
+ static_cast<uint32_t>(AidlDataspace::V0_SRGB));
+static_assert(static_cast<uint32_t>(AidlDataspace::JFIF) ==
+ static_cast<uint32_t>(AidlDataspace::V0_JFIF));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT601_625) ==
+ static_cast<uint32_t>(AidlDataspace::V0_BT601_625));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT601_525) ==
+ static_cast<uint32_t>(AidlDataspace::V0_BT601_525));
+static_assert(static_cast<uint32_t>(AidlDataspace::BT709) ==
+ static_cast<uint32_t>(AidlDataspace::V0_BT709));
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index 78114a1..0c4a752 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -55,6 +55,8 @@
"TransactionApplicationTest.cpp",
"StrongTypingTest.cpp",
"VSyncDispatchTimerQueueTest.cpp",
+ "VSyncDispatchRealtimeTest.cpp",
+ "VSyncPredictorTest.cpp",
"mock/DisplayHardware/MockComposer.cpp",
"mock/DisplayHardware/MockDisplay.cpp",
"mock/DisplayHardware/MockPowerAdvisor.cpp",
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index b1a4951..db7d04c 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -469,6 +469,10 @@
getDisplayAttribute(HWC_DISPLAY_ID, HWC_ACTIVE_CONFIG_ID,
IComposerClient::Attribute::DPI_Y, _))
.WillOnce(DoAll(SetArgPointee<3>(DEFAULT_DPI), Return(Error::NONE)));
+ EXPECT_CALL(*test->mComposer,
+ getDisplayAttribute(HWC_DISPLAY_ID, HWC_ACTIVE_CONFIG_ID,
+ IComposerClient::Attribute::CONFIG_GROUP, _))
+ .WillOnce(DoAll(SetArgPointee<3>(-1), Return(Error::NONE)));
if (PhysicalDisplay::HAS_IDENTIFICATION_DATA) {
EXPECT_CALL(*test->mComposer, getDisplayIdentificationData(HWC_DISPLAY_ID, _, _))
diff --git a/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp b/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp
index b5af591..c334bcf 100644
--- a/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameTracerTest.cpp
@@ -82,8 +82,8 @@
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
- mFrameTracer->traceNewLayer(layerID, layerName);
+ const int32_t layerId = 5;
+ mFrameTracer->traceNewLayer(layerId, layerName);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
@@ -92,7 +92,7 @@
tracingSession->StartBlocking();
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
- mFrameTracer->traceNewLayer(layerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 1\n");
tracingSession->StopBlocking();
@@ -103,31 +103,31 @@
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
- const int32_t secondLayerID = 6;
+ const int32_t layerId = 5;
+ const int32_t secondlayerId = 6;
auto tracingSession = getTracingSessionForTest();
tracingSession->StartBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceNewLayer(secondLayerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceNewLayer(secondlayerId, layerName);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 2\n");
tracingSession->StopBlocking();
- mFrameTracer->onDestroy(layerID);
+ mFrameTracer->onDestroy(layerId);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 1\n");
- mFrameTracer->onDestroy(layerID);
+ mFrameTracer->onDestroy(layerId);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 1\n");
- mFrameTracer->onDestroy(secondLayerID);
+ mFrameTracer->onDestroy(secondlayerId);
EXPECT_EQ(mFrameTracer->miniDump(),
"FrameTracer miniDump:\nNumber of layers currently being traced is 0\n");
}
TEST_F(FrameTracerTest, canTraceAfterAddingLayer) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 1;
+ const int32_t layerId = 1;
const uint32_t bufferID = 2;
const uint64_t frameNumber = 3;
const nsecs_t timestamp = 4;
@@ -141,9 +141,9 @@
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceTimestamp(layerID, bufferID, frameNumber, timestamp, type, duration);
+ mFrameTracer->traceTimestamp(layerId, bufferID, frameNumber, timestamp, type, duration);
// Create second trace packet to finalize the previous one.
- mFrameTracer->traceTimestamp(layerID, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -157,10 +157,10 @@
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceTimestamp(layerID, bufferID, frameNumber, timestamp, type, duration);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceTimestamp(layerId, bufferID, frameNumber, timestamp, type, duration);
// Create second trace packet to finalize the previous one.
- mFrameTracer->traceTimestamp(layerID, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, 0, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -191,7 +191,7 @@
TEST_F(FrameTracerTest, traceFenceTriggersOnNextTraceAfterFenceFired) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -204,10 +204,10 @@
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
// Trace.
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fenceTime, type);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fenceTime, type);
// Create extra trace packet to (hopefully not) trigger and finalize the fence packet.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
EXPECT_EQ(raw_trace.size(), 0);
@@ -219,12 +219,12 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fenceTime, type);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fenceTime, type);
const nsecs_t timestamp = systemTime();
fenceFactory.signalAllForTest(Fence::NO_FENCE, timestamp);
// Create extra trace packet to trigger and finalize fence trace packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -254,7 +254,7 @@
TEST_F(FrameTracerTest, traceFenceWithStartTimeAfterSignalTime_ShouldHaveNoDuration) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -264,24 +264,24 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
// traceFence called after fence signalled.
const nsecs_t signalTime1 = systemTime();
const nsecs_t startTime1 = signalTime1 + 100000;
auto fence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime1);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence1, type, startTime1);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence1, type, startTime1);
// traceFence called before fence signalled.
const nsecs_t signalTime2 = systemTime();
const nsecs_t startTime2 = signalTime2 + 100000;
auto fence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence2, type, startTime2);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence2, type, startTime2);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime2);
// Create extra trace packet to trigger and finalize fence trace packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -309,7 +309,7 @@
TEST_F(FrameTracerTest, traceFenceOlderThanDeadline_ShouldBeIgnored) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -321,11 +321,11 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence, type);
+ mFrameTracer->traceNewLayer(layerId, layerName);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence, type);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime);
// Create extra trace packet to trigger and finalize any previous fence packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
@@ -334,7 +334,7 @@
TEST_F(FrameTracerTest, traceFenceWithValidStartTime_ShouldHaveCorrectDuration) {
const std::string layerName = "co.layername#0";
- const int32_t layerID = 5;
+ const int32_t layerId = 5;
const uint32_t bufferID = 4;
const uint64_t frameNumber = 3;
const auto type = FrameTracer::FrameEvent::ACQUIRE_FENCE;
@@ -345,24 +345,24 @@
tracingSession->StartBlocking();
// Clean up irrelevant traces.
tracingSession->ReadTraceBlocking();
- mFrameTracer->traceNewLayer(layerID, layerName);
+ mFrameTracer->traceNewLayer(layerId, layerName);
// traceFence called after fence signalled.
const nsecs_t signalTime1 = systemTime();
const nsecs_t startTime1 = signalTime1 - duration;
auto fence1 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime1);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence1, type, startTime1);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence1, type, startTime1);
// traceFence called before fence signalled.
const nsecs_t signalTime2 = systemTime();
const nsecs_t startTime2 = signalTime2 - duration;
auto fence2 = fenceFactory.createFenceTimeForTest(Fence::NO_FENCE);
- mFrameTracer->traceFence(layerID, bufferID, frameNumber, fence2, type, startTime2);
+ mFrameTracer->traceFence(layerId, bufferID, frameNumber, fence2, type, startTime2);
fenceFactory.signalAllForTest(Fence::NO_FENCE, signalTime2);
// Create extra trace packet to trigger and finalize fence trace packets.
- mFrameTracer->traceTimestamp(layerID, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
+ mFrameTracer->traceTimestamp(layerId, bufferID, 0, 0, FrameTracer::FrameEvent::UNSPECIFIED);
tracingSession->StopBlocking();
std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
diff --git a/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp b/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp
index 4eb9ec3..069344a 100644
--- a/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TimeStatsTest.cpp
@@ -20,11 +20,11 @@
#include <TimeStats/TimeStats.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-
#include <log/log.h>
#include <utils/String16.h>
#include <utils/Vector.h>
+#include <chrono>
#include <random>
#include <unordered_set>
@@ -170,8 +170,8 @@
return result;
}
-static std::string genLayerName(int32_t layerID) {
- return (layerID < 0 ? "PopupWindow:b54fcd1#0" : "com.dummy#") + std::to_string(layerID);
+static std::string genLayerName(int32_t layerId) {
+ return (layerId < 0 ? "PopupWindow:b54fcd1#0" : "com.dummy#") + std::to_string(layerId);
}
void TimeStatsTest::setTimeStamp(TimeStamp type, int32_t id, uint64_t frameNumber, nsecs_t ts) {
@@ -278,6 +278,31 @@
EXPECT_EQ(2, histogramProto.time_millis());
}
+TEST_F(TimeStatsTest, canInsertGlobalFrameDuration) {
+ EXPECT_TRUE(inputCommand(InputCommand::ENABLE, FMT_STRING).empty());
+
+ using namespace std::chrono_literals;
+
+ mTimeStats->setPowerMode(HWC_POWER_MODE_OFF);
+ mTimeStats
+ ->recordFrameDuration(std::chrono::duration_cast<std::chrono::nanoseconds>(1ms).count(),
+ std::chrono::duration_cast<std::chrono::nanoseconds>(5ms)
+ .count());
+ mTimeStats->setPowerMode(HWC_POWER_MODE_NORMAL);
+ mTimeStats
+ ->recordFrameDuration(std::chrono::duration_cast<std::chrono::nanoseconds>(3ms).count(),
+ std::chrono::duration_cast<std::chrono::nanoseconds>(6ms)
+ .count());
+
+ SFTimeStatsGlobalProto globalProto;
+ ASSERT_TRUE(globalProto.ParseFromString(inputCommand(InputCommand::DUMP_ALL, FMT_PROTO)));
+
+ ASSERT_EQ(1, globalProto.frame_duration_size());
+ const SFTimeStatsHistogramBucketProto& histogramProto = globalProto.frame_duration().Get(0);
+ EXPECT_EQ(1, histogramProto.frame_count());
+ EXPECT_EQ(3, histogramProto.time_millis());
+}
+
TEST_F(TimeStatsTest, canInsertOneLayerTimeStats) {
EXPECT_TRUE(inputCommand(InputCommand::ENABLE, FMT_STRING).empty());
@@ -560,22 +585,22 @@
EXPECT_TRUE(inputCommand(InputCommand::ENABLE, FMT_STRING).empty());
for (size_t i = 0; i < 10000000; ++i) {
- const int32_t layerID = genRandomInt32(-1, 10);
+ const int32_t layerId = genRandomInt32(-1, 10);
const int32_t frameNumber = genRandomInt32(1, 10);
switch (genRandomInt32(0, 100)) {
case 0:
ALOGV("removeTimeRecord");
- ASSERT_NO_FATAL_FAILURE(mTimeStats->removeTimeRecord(layerID, frameNumber));
+ ASSERT_NO_FATAL_FAILURE(mTimeStats->removeTimeRecord(layerId, frameNumber));
continue;
case 1:
ALOGV("onDestroy");
- ASSERT_NO_FATAL_FAILURE(mTimeStats->onDestroy(layerID));
+ ASSERT_NO_FATAL_FAILURE(mTimeStats->onDestroy(layerId));
continue;
}
TimeStamp type = static_cast<TimeStamp>(genRandomInt32(TIME_STAMP_BEGIN, TIME_STAMP_END));
const int32_t ts = genRandomInt32(1, 1000000000);
- ALOGV("type[%d], layerID[%d], frameNumber[%d], ts[%d]", type, layerID, frameNumber, ts);
- setTimeStamp(type, layerID, frameNumber, ts);
+ ALOGV("type[%d], layerId[%d], frameNumber[%d], ts[%d]", type, layerId, frameNumber, ts);
+ setTimeStamp(type, layerId, frameNumber, ts);
}
}
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
new file mode 100644
index 0000000..c012616
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Scheduler/TimeKeeper.h"
+#include "Scheduler/Timer.h"
+#include "Scheduler/VSyncDispatchTimerQueue.h"
+#include "Scheduler/VSyncTracker.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <thread>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+template <typename Rep, typename Per>
+constexpr nsecs_t toNs(std::chrono::duration<Rep, Per> const& tp) {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(tp).count();
+}
+
+class FixedRateIdealStubTracker : public VSyncTracker {
+public:
+ FixedRateIdealStubTracker() : mPeriod{toNs(3ms)} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t timePoint) const final {
+ auto const floor = timePoint % mPeriod;
+ if (floor == 0) {
+ return timePoint;
+ }
+ return timePoint - floor + mPeriod;
+ }
+
+private:
+ nsecs_t const mPeriod;
+};
+
+class VRRStubTracker : public VSyncTracker {
+public:
+ VRRStubTracker(nsecs_t period) : mPeriod{period} {}
+
+ void addVsyncTimestamp(nsecs_t) final {}
+
+ nsecs_t nextAnticipatedVSyncTimeFrom(nsecs_t time_point) const final {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ auto const normalized_to_base = time_point - mBase;
+ auto const floor = (normalized_to_base) % mPeriod;
+ if (floor == 0) {
+ return time_point;
+ }
+ return normalized_to_base - floor + mPeriod + mBase;
+ }
+
+ void set_interval(nsecs_t interval, nsecs_t last_known) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mPeriod = interval;
+ mBase = last_known;
+ }
+
+private:
+ std::mutex mutable mMutex;
+ nsecs_t mPeriod;
+ nsecs_t mBase = 0;
+};
+
+struct VSyncDispatchRealtimeTest : testing::Test {
+ static nsecs_t constexpr mDispatchGroupThreshold = toNs(100us);
+ static size_t constexpr mIterations = 20;
+};
+
+class RepeatingCallbackReceiver {
+public:
+ RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t wl)
+ : mWorkload(wl),
+ mCallback(
+ dispatch, [&](auto time) { callback_called(time); }, "repeat0") {}
+
+ void repeatedly_schedule(size_t iterations, std::function<void(nsecs_t)> const& onEachFrame) {
+ mCallbackTimes.reserve(iterations);
+ mCallback.schedule(mWorkload, systemTime(SYSTEM_TIME_MONOTONIC) + mWorkload);
+
+ for (auto i = 0u; i < iterations - 1; i++) {
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ mCalled = false;
+ auto last = mLastTarget;
+ lk.unlock();
+
+ onEachFrame(last);
+
+ mCallback.schedule(mWorkload, last + mWorkload);
+ }
+
+ // wait for the last callback.
+ std::unique_lock<decltype(mMutex)> lk(mMutex);
+ mCv.wait(lk, [&] { return mCalled; });
+ }
+
+ void with_callback_times(std::function<void(std::vector<nsecs_t> const&)> const& fn) const {
+ fn(mCallbackTimes);
+ }
+
+private:
+ void callback_called(nsecs_t time) {
+ std::lock_guard<decltype(mMutex)> lk(mMutex);
+ mCallbackTimes.push_back(time);
+ mCalled = true;
+ mLastTarget = time;
+ mCv.notify_all();
+ }
+
+ nsecs_t const mWorkload;
+ VSyncCallbackRegistration mCallback;
+
+ std::mutex mMutex;
+ std::condition_variable mCv;
+ bool mCalled = false;
+ nsecs_t mLastTarget = 0;
+ std::vector<nsecs_t> mCallbackTimes;
+};
+
+TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
+ FixedRateIdealStubTracker tracker;
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ static size_t constexpr num_clients = 3;
+ std::array<RepeatingCallbackReceiver, num_clients>
+ cb_receiver{RepeatingCallbackReceiver(dispatch, toNs(1500us)),
+ RepeatingCallbackReceiver(dispatch, toNs(0h)),
+ RepeatingCallbackReceiver(dispatch, toNs(1ms))};
+
+ auto const on_each_frame = [](nsecs_t) {};
+ std::array<std::thread, num_clients> threads{
+ std::thread([&] { cb_receiver[0].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[1].repeatedly_schedule(mIterations, on_each_frame); }),
+ std::thread([&] { cb_receiver[2].repeatedly_schedule(mIterations, on_each_frame); }),
+ };
+
+ for (auto it = threads.rbegin(); it != threads.rend(); it++) {
+ it->join();
+ }
+
+ for (auto const& cbs : cb_receiver) {
+ cbs.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+ }
+}
+
+// starts at 333hz, slides down to 43hz
+TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
+ auto next_vsync_interval = toNs(3ms);
+ VRRStubTracker tracker(next_vsync_interval);
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
+ };
+
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+
+// starts at 333hz, jumps to 200hz at frame 10
+TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
+ VRRStubTracker tracker(toNs(3ms));
+ VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold);
+
+ RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms));
+
+ auto jump_frame_counter = 0u;
+ auto constexpr jump_frame_at = 10u;
+ auto const on_each_frame = [&](nsecs_t last_known) {
+ if (jump_frame_counter++ == jump_frame_at) {
+ tracker.set_interval(toNs(5ms), last_known);
+ }
+ };
+ std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
+ eventThread.join();
+
+ cb_receiver.with_callback_times([](auto times) { EXPECT_THAT(times.size(), Eq(mIterations)); });
+}
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
new file mode 100644
index 0000000..d0c8090
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "LibSurfaceFlingerUnittests"
+#define LOG_NDEBUG 0
+
+#include "Scheduler/VSyncPredictor.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <algorithm>
+#include <chrono>
+#include <utility>
+
+using namespace testing;
+using namespace std::literals;
+
+namespace android::scheduler {
+
+MATCHER_P2(IsCloseTo, value, tolerance, "is within tolerance") {
+ return arg <= value + tolerance && value >= value - tolerance;
+}
+
+std::vector<nsecs_t> generateVsyncTimestamps(size_t count, nsecs_t period, nsecs_t bias) {
+ std::vector<nsecs_t> vsyncs(count);
+ std::generate(vsyncs.begin(), vsyncs.end(),
+ [&, n = 0]() mutable { return n++ * period + bias; });
+ return vsyncs;
+}
+
+struct VSyncPredictorTest : testing::Test {
+ nsecs_t mNow = 0;
+ nsecs_t mPeriod = 1000;
+ static constexpr size_t kHistorySize = 10;
+ static constexpr size_t kMinimumSamplesForPrediction = 6;
+ static constexpr size_t kOutlierTolerancePercent = 25;
+ static constexpr nsecs_t mMaxRoundingError = 100;
+
+ VSyncPredictor tracker{mPeriod, kHistorySize, kMinimumSamplesForPrediction,
+ kOutlierTolerancePercent};
+};
+
+TEST_F(VSyncPredictorTest, reportsAnticipatedPeriod) {
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+
+ EXPECT_THAT(slope, Eq(mPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ auto const changedPeriod = 2000;
+ tracker.setPeriod(changedPeriod);
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(changedPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+}
+
+TEST_F(VSyncPredictorTest, reportsSamplesNeededWhenHasNoDataPoints) {
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow += mPeriod));
+ tracker.addVsyncTimestamp(mNow);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+}
+
+TEST_F(VSyncPredictorTest, reportsSamplesNeededAfterExplicitRateChange) {
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(mNow += mPeriod);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+
+ auto const changedPeriod = mPeriod * 2;
+ tracker.setPeriod(changedPeriod);
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow));
+
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow += changedPeriod));
+ tracker.addVsyncTimestamp(mNow);
+ }
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+}
+
+TEST_F(VSyncPredictorTest, transitionsToModelledPointsAfterSynthetic) {
+ auto last = mNow;
+ auto const bias = 10;
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(last + mPeriod));
+ mNow += mPeriod - bias;
+ last = mNow;
+ tracker.addVsyncTimestamp(mNow);
+ mNow += bias;
+ }
+
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + mPeriod - bias));
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 100), Eq(mNow + mPeriod - bias));
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow + 990), Eq(mNow + 2 * mPeriod - bias));
+}
+
+TEST_F(VSyncPredictorTest, uponNotifiedOfInaccuracyUsesSynthetic) {
+ auto const slightlyLessPeriod = mPeriod - 10;
+ auto const changedPeriod = mPeriod - 1;
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(mNow += slightlyLessPeriod);
+ }
+
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + slightlyLessPeriod));
+ tracker.setPeriod(changedPeriod);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mNow + changedPeriod));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelines_60hzHighVariance) {
+ // these are precomputed simulated 16.6s vsyncs with uniform distribution +/- 1.6ms error
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 15492949, 32325658, 49534984, 67496129, 84652891,
+ 100332564, 117737004, 132125931, 149291099, 165199602,
+ };
+ auto constexpr idealPeriod = 16600000;
+ auto constexpr expectedPeriod = 16639242;
+ auto constexpr expectedIntercept = 1049341;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelines_90hzLowVariance) {
+ // these are precomputed simulated 11.1 vsyncs with uniform distribution +/- 1ms error
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 11167047, 22603464, 32538479, 44938134, 56321268,
+ 66730346, 78062637, 88171429, 99707843, 111397621,
+ };
+ auto idealPeriod = 11110000;
+ auto expectedPeriod = 11089413;
+ auto expectedIntercept = 94421;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, adaptsToFenceTimelinesDiscontinuous_22hzLowVariance) {
+ // these are 11.1s vsyncs with low variance, randomly computed, between -1 and 1ms
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 45259463, // 0
+ 91511026, // 1
+ 136307650, // 2
+ 1864501714, // 40
+ 1908641034, // 41
+ 1955278544, // 42
+ 4590180096, // 100
+ 4681594994, // 102
+ 5499224734, // 120
+ 5591378272, // 122
+ };
+ auto idealPeriod = 45454545;
+ auto expectedPeriod = 45450152;
+ auto expectedIntercept = 469647;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, againstOutliersDiscontinuous_500hzLowVariance) {
+ std::vector<nsecs_t> const simulatedVsyncs{
+ 1992548, // 0
+ 4078038, // 1
+ 6165794, // 2
+ 7958171, // 3
+ 10193537, // 4
+ 2401840200, // 1200
+ 2403000000, // an outlier that should be excluded (1201 and a half)
+ 2405803629, // 1202
+ 2408028599, // 1203
+ 2410121051, // 1204
+ };
+ auto idealPeriod = 2000000;
+ auto expectedPeriod = 1999892;
+ auto expectedIntercept = 175409;
+
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(expectedPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(expectedIntercept, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, handlesVsyncChange) {
+ auto const fastPeriod = 100;
+ auto const fastTimeBase = 100;
+ auto const slowPeriod = 400;
+ auto const slowTimeBase = 800;
+ auto const simulatedVsyncsFast =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod, fastTimeBase);
+ auto const simulatedVsyncsSlow =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, slowPeriod, slowTimeBase);
+
+ tracker.setPeriod(fastPeriod);
+ for (auto const& timestamp : simulatedVsyncsFast) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ auto const mMaxRoundingError = 100;
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(fastPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(0, mMaxRoundingError));
+
+ tracker.setPeriod(slowPeriod);
+ for (auto const& timestamp : simulatedVsyncsSlow) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, IsCloseTo(slowPeriod, mMaxRoundingError));
+ EXPECT_THAT(intercept, IsCloseTo(0, mMaxRoundingError));
+}
+
+TEST_F(VSyncPredictorTest, willBeAccurateUsingPriorResultsForRate) {
+ auto const fastPeriod = 101000;
+ auto const fastTimeBase = fastPeriod - 500;
+ auto const fastPeriod2 = 99000;
+
+ auto const slowPeriod = 400000;
+ auto const slowTimeBase = 800000 - 201;
+ auto const simulatedVsyncsFast =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod, fastTimeBase);
+ auto const simulatedVsyncsSlow =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, slowPeriod, slowTimeBase);
+ auto const simulatedVsyncsFast2 =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction, fastPeriod2, fastTimeBase);
+
+ auto idealPeriod = 100000;
+ tracker.setPeriod(idealPeriod);
+ for (auto const& timestamp : simulatedVsyncsFast) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto [slope, intercept] = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ tracker.setPeriod(slowPeriod);
+ for (auto const& timestamp : simulatedVsyncsSlow) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+
+ // we had a model for 100ns mPeriod before, use that until the new samples are
+ // sufficiently built up
+ tracker.setPeriod(idealPeriod);
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod));
+ EXPECT_THAT(intercept, Eq(0));
+
+ for (auto const& timestamp : simulatedVsyncsFast2) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ std::tie(slope, intercept) = tracker.getVSyncPredictionModel();
+ EXPECT_THAT(slope, Eq(fastPeriod2));
+ EXPECT_THAT(intercept, Eq(0));
+}
+
+TEST_F(VSyncPredictorTest, willBecomeInaccurateAfterA_longTimeWithNoSamples) {
+ auto const simulatedVsyncs = generateVsyncTimestamps(kMinimumSamplesForPrediction, mPeriod, 0);
+
+ for (auto const& timestamp : simulatedVsyncs) {
+ tracker.addVsyncTimestamp(timestamp);
+ }
+ auto const mNow = *simulatedVsyncs.rbegin();
+ EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+
+ // TODO: would be better to decay this as a result of the variance of the samples
+ static auto constexpr aLongTimeOut = 1000000000;
+ EXPECT_TRUE(tracker.needsMoreSamples(mNow + aLongTimeOut));
+}
+
+TEST_F(VSyncPredictorTest, idealModelPredictionsBeforeRegressionModelIsBuilt) {
+ auto const simulatedVsyncs =
+ generateVsyncTimestamps(kMinimumSamplesForPrediction + 1, mPeriod, 0);
+ nsecs_t const mNow = 0;
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(mNow), Eq(mPeriod));
+
+ nsecs_t const aBitOfTime = 422;
+
+ for (auto i = 0; i < kMinimumSamplesForPrediction; i++) {
+ tracker.addVsyncTimestamp(simulatedVsyncs[i]);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(simulatedVsyncs[i] + aBitOfTime),
+ Eq(mPeriod + simulatedVsyncs[i]));
+ }
+
+ for (auto i = kMinimumSamplesForPrediction; i < simulatedVsyncs.size(); i++) {
+ tracker.addVsyncTimestamp(simulatedVsyncs[i]);
+ EXPECT_THAT(tracker.nextAnticipatedVSyncTimeFrom(simulatedVsyncs[i] + aBitOfTime),
+ Eq(mPeriod + simulatedVsyncs[i]));
+ }
+}
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockComposer.h b/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockComposer.h
index 98c6aa0..2453ccb 100644
--- a/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockComposer.h
+++ b/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockComposer.h
@@ -39,8 +39,8 @@
using android::hardware::graphics::composer::V2_1::Display;
using android::hardware::graphics::composer::V2_1::Error;
using android::hardware::graphics::composer::V2_1::IComposer;
-using android::hardware::graphics::composer::V2_1::IComposerCallback;
using android::hardware::graphics::composer::V2_1::Layer;
+using android::hardware::graphics::composer::V2_4::IComposerCallback;
using android::hardware::graphics::composer::V2_4::IComposerClient;
class Composer : public Hwc2::Composer {
@@ -120,8 +120,16 @@
MOCK_METHOD3(setLayerPerFrameMetadataBlobs,
Error(Display, Layer, const std::vector<IComposerClient::PerFrameMetadataBlob>&));
MOCK_METHOD2(setDisplayBrightness, Error(Display, float));
+ MOCK_METHOD0(isVsyncPeriodSwitchSupported, bool());
MOCK_METHOD2(getDisplayCapabilities, Error(Display, std::vector<DisplayCapability>*));
- MOCK_METHOD2(getDisplayConnectionType, Error(Display, IComposerClient::DisplayConnectionType*));
+ MOCK_METHOD2(getDisplayConnectionType,
+ V2_4::Error(Display, IComposerClient::DisplayConnectionType*));
+ MOCK_METHOD3(getSupportedDisplayVsyncPeriods,
+ V2_4::Error(Display, Config, std::vector<VsyncPeriodNanos>*));
+ MOCK_METHOD2(getDisplayVsyncPeriod, V2_4::Error(Display, VsyncPeriodNanos*));
+ MOCK_METHOD4(setActiveConfigWithConstraints,
+ V2_4::Error(Display, Config, const IComposerClient::VsyncPeriodChangeConstraints&,
+ VsyncPeriodChangeTimeline*));
};
} // namespace mock
diff --git a/services/surfaceflinger/tests/unittests/mock/MockTimeStats.h b/services/surfaceflinger/tests/unittests/mock/MockTimeStats.h
index b1634a8..e94af49 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockTimeStats.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockTimeStats.h
@@ -34,6 +34,7 @@
MOCK_METHOD0(incrementTotalFrames, void());
MOCK_METHOD0(incrementMissedFrames, void());
MOCK_METHOD0(incrementClientCompositionFrames, void());
+ MOCK_METHOD2(recordFrameDuration, void(nsecs_t, nsecs_t));
MOCK_METHOD4(setPostTime, void(int32_t, uint64_t, const std::string&, nsecs_t));
MOCK_METHOD3(setLatchTime, void(int32_t, uint64_t, nsecs_t));
MOCK_METHOD3(setDesiredTime, void(int32_t, uint64_t, nsecs_t));