Merge "Add producerId so we know when the BBQ producer has been changed."
diff --git a/TEST_MAPPING b/TEST_MAPPING
index f54f132..cd8f3cd 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -4,58 +4,7 @@
"name": "SurfaceFlinger_test",
"options": [
{
- "include-filter": "*CredentialsTest.*"
- },
- {
- "include-filter": "*SurfaceFlingerStress.*"
- },
- {
- "include-filter": "*SurfaceInterceptorTest.*"
- },
- {
- "include-filter": "*LayerTransactionTest.*"
- },
- {
- "include-filter": "*LayerTypeTransactionTest.*"
- },
- {
- "include-filter": "*LayerUpdateTest.*"
- },
- {
- "include-filter": "*GeometryLatchingTest.*"
- },
- {
- "include-filter": "*CropLatchingTest.*"
- },
- {
- "include-filter": "*ChildLayerTest.*"
- },
- {
- "include-filter": "*ScreenCaptureTest.*"
- },
- {
- "include-filter": "*ScreenCaptureChildOnlyTest.*"
- },
- {
- "include-filter": "*DereferenceSurfaceControlTest.*"
- },
- {
- "include-filter": "*BoundlessLayerTest.*"
- },
- {
- "include-filter": "*MultiDisplayLayerBoundsTest.*"
- },
- {
- "include-filter": "*InvalidHandleTest.*"
- },
- {
- "include-filter": "*VirtualDisplayTest.*"
- },
- {
- "include-filter": "*RelativeZTest.*"
- },
- {
- "include-filter": "*RefreshRateOverlayTest.*"
+ "include-filter": "*"
},
{
"exclude-filter": "*ChildLayerTest#ChildrenSurviveParentDestruction"
@@ -76,58 +25,7 @@
"name": "SurfaceFlinger_test",
"options": [
{
- "include-filter": "*CredentialsTest.*"
- },
- {
- "include-filter": "*SurfaceFlingerStress.*"
- },
- {
- "include-filter": "*SurfaceInterceptorTest.*"
- },
- {
- "include-filter": "*LayerTransactionTest.*"
- },
- {
- "include-filter": "*LayerTypeTransactionTest.*"
- },
- {
- "include-filter": "*LayerUpdateTest.*"
- },
- {
- "include-filter": "*GeometryLatchingTest.*"
- },
- {
- "include-filter": "*CropLatchingTest.*"
- },
- {
- "include-filter": "*ChildLayerTest.*"
- },
- {
- "include-filter": "*ScreenCaptureTest.*"
- },
- {
- "include-filter": "*ScreenCaptureChildOnlyTest.*"
- },
- {
- "include-filter": "*DereferenceSurfaceControlTest.*"
- },
- {
- "include-filter": "*BoundlessLayerTest.*"
- },
- {
- "include-filter": "*MultiDisplayLayerBoundsTest.*"
- },
- {
- "include-filter": "*InvalidHandleTest.*"
- },
- {
- "include-filter": "*VirtualDisplayTest.*"
- },
- {
- "include-filter": "*RelativeZTest.*"
- },
- {
- "include-filter": "*RefreshRateOverlayTest.*"
+ "include-filter": "*"
}
]
}
diff --git a/include/private/surface_control_private.h b/include/private/surface_control_private.h
index 7e6c515..138926e 100644
--- a/include/private/surface_control_private.h
+++ b/include/private/surface_control_private.h
@@ -19,6 +19,8 @@
#include <stdint.h>
+#include <android/choreographer.h>
+
__BEGIN_DECLS
struct ASurfaceControl;
@@ -56,6 +58,13 @@
ASurfaceControl_SurfaceStatsListener func);
/**
+ * Gets the attached AChoreographer instance from the given \c surfaceControl. If there is no
+ * choreographer associated with the surface control, then a new instance of choreographer is
+ * created. The new choreographer is associated with the current thread's Looper.
+ */
+AChoreographer* ASurfaceControl_getChoreographer(ASurfaceControl* surfaceControl);
+
+/**
* Returns the timestamp of when the buffer was acquired for a specific frame with frame number
* obtained from ASurfaceControlStats_getFrameNumber.
*/
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 661f70c..b84a9d3 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -215,6 +215,8 @@
cc_defaults {
name: "trusty_mock_defaults",
+ vendor_available: true,
+ host_supported: true,
header_libs: [
"trusty_mock_headers",
diff --git a/libs/binder/ndk/include_platform/android/binder_manager.h b/libs/binder/ndk/include_platform/android/binder_manager.h
index ad4188f..86d5ed2 100644
--- a/libs/binder/ndk/include_platform/android/binder_manager.h
+++ b/libs/binder/ndk/include_platform/android/binder_manager.h
@@ -38,6 +38,22 @@
AIBinder* binder, const char* instance) __INTRODUCED_IN(29);
/**
+ * This registers the service with the default service manager under this instance name. This does
+ * not take ownership of binder.
+ *
+ * WARNING: when using this API across an APEX boundary, do not use with unstable
+ * AIDL services. TODO(b/139325195)
+ *
+ * \param binder object to register globally with the service manager.
+ * \param instance identifier of the service. This will be used to lookup the service.
+ * \param allowIsolated allows if this service can be isolated.
+ *
+ * \return EX_NONE on success.
+ */
+__attribute__((warn_unused_result)) binder_exception_t AServiceManager_addServiceWithAllowIsolated(
+ AIBinder* binder, const char* instance, bool allowIsolated) __INTRODUCED_IN(34);
+
+/**
* Gets a binder object with this specific instance name. Will return nullptr immediately if the
* service is not available This also implicitly calls AIBinder_incStrong (so the caller of this
* function is responsible for calling AIBinder_decStrong).
diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt
index 54e4628..5f2f617 100644
--- a/libs/binder/ndk/libbinder_ndk.map.txt
+++ b/libs/binder/ndk/libbinder_ndk.map.txt
@@ -163,6 +163,7 @@
LIBBINDER_NDK_PLATFORM {
global:
AParcel_getAllowFds;
+ AServiceManager_addServiceWithAllowIsolated;
extern "C++" {
AIBinder_fromPlatformBinder*;
AIBinder_toPlatformBinder*;
diff --git a/libs/binder/ndk/service_manager.cpp b/libs/binder/ndk/service_manager.cpp
index e107c83..2763ddb 100644
--- a/libs/binder/ndk/service_manager.cpp
+++ b/libs/binder/ndk/service_manager.cpp
@@ -41,6 +41,19 @@
status_t exception = sm->addService(String16(instance), binder->getBinder());
return PruneException(exception);
}
+
+binder_exception_t AServiceManager_addServiceWithAllowIsolated(AIBinder* binder,
+ const char* instance,
+ bool allowIsolated) {
+ if (binder == nullptr || instance == nullptr) {
+ return EX_ILLEGAL_ARGUMENT;
+ }
+
+ sp<IServiceManager> sm = defaultServiceManager();
+ status_t exception = sm->addService(String16(instance), binder->getBinder(), allowIsolated);
+ return PruneException(exception);
+}
+
AIBinder* AServiceManager_checkService(const char* instance) {
if (instance == nullptr) {
return nullptr;
diff --git a/libs/binder/tests/Android.bp b/libs/binder/tests/Android.bp
index 7006f87..e609987 100644
--- a/libs/binder/tests/Android.bp
+++ b/libs/binder/tests/Android.bp
@@ -370,6 +370,31 @@
],
}
+cc_binary {
+ name: "binderRpcTest_on_trusty_mock",
+ defaults: [
+ "trusty_mock_defaults",
+ ],
+
+ srcs: [
+ "binderRpcUniversalTests.cpp",
+ "binderRpcTestCommon.cpp",
+ "binderRpcTestTrusty.cpp",
+ ],
+
+ shared_libs: [
+ "libbinder_on_trusty_mock",
+ "libbase",
+ "libutils",
+ "libcutils",
+ ],
+
+ static_libs: [
+ "binderRpcTestIface-cpp",
+ "libgtest",
+ ],
+}
+
cc_test {
name: "binderRpcTest",
defaults: [
@@ -382,6 +407,7 @@
required: [
"libbinder_on_trusty_mock",
"binderRpcTestService_on_trusty_mock",
+ "binderRpcTest_on_trusty_mock",
],
}
diff --git a/libs/binder/tests/binderRpcTestTrusty.cpp b/libs/binder/tests/binderRpcTestTrusty.cpp
new file mode 100644
index 0000000..b3bb5eb
--- /dev/null
+++ b/libs/binder/tests/binderRpcTestTrusty.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "binderRpcTest"
+
+#include <android-base/stringprintf.h>
+#include <binder/RpcTransportTipcTrusty.h>
+#include <trusty-gtest.h>
+#include <trusty_ipc.h>
+
+#include "binderRpcTestFixture.h"
+
+namespace android {
+
+// Destructors need to be defined, even if pure virtual
+ProcessSession::~ProcessSession() {}
+
+class TrustyProcessSession : public ProcessSession {
+public:
+ ~TrustyProcessSession() override {}
+
+ void setCustomExitStatusCheck(std::function<void(int wstatus)> /*f*/) override {
+ LOG_ALWAYS_FATAL("setCustomExitStatusCheck() not supported");
+ }
+
+ void terminate() override { LOG_ALWAYS_FATAL("terminate() not supported"); }
+};
+
+std::string BinderRpc::PrintParamInfo(const testing::TestParamInfo<ParamType>& info) {
+ auto [type, security, clientVersion, serverVersion, singleThreaded, noKernel] = info.param;
+ auto ret = PrintToString(type) + "_clientV" + std::to_string(clientVersion) + "_serverV" +
+ std::to_string(serverVersion);
+ if (singleThreaded) {
+ ret += "_single_threaded";
+ }
+ if (noKernel) {
+ ret += "_no_kernel";
+ }
+ return ret;
+}
+
+// This creates a new process serving an interface on a certain number of
+// threads.
+std::unique_ptr<ProcessSession> BinderRpc::createRpcTestSocketServerProcessEtc(
+ const BinderRpcOptions& options) {
+ LOG_ALWAYS_FATAL_IF(options.numIncomingConnections != 0,
+ "Non-zero incoming connections %zu on Trusty",
+ options.numIncomingConnections);
+
+ uint32_t clientVersion = std::get<2>(GetParam());
+ uint32_t serverVersion = std::get<3>(GetParam());
+
+ auto ret = std::make_unique<TrustyProcessSession>();
+
+ status_t status;
+ for (size_t i = 0; i < options.numSessions; i++) {
+ auto factory = android::RpcTransportCtxFactoryTipcTrusty::make();
+ auto session = android::RpcSession::make(std::move(factory));
+
+ EXPECT_TRUE(session->setProtocolVersion(clientVersion));
+ session->setMaxOutgoingThreads(options.numOutgoingConnections);
+ session->setFileDescriptorTransportMode(options.clientFileDescriptorTransportMode);
+
+ status = session->setupPreconnectedClient({}, [&]() {
+ auto port = trustyIpcPort(serverVersion);
+ int rc = connect(port.c_str(), IPC_CONNECT_WAIT_FOR_PORT);
+ LOG_ALWAYS_FATAL_IF(rc < 0, "Failed to connect to service: %d", rc);
+ return base::unique_fd(rc);
+ });
+ if (options.allowConnectFailure && status != OK) {
+ ret->sessions.clear();
+ break;
+ }
+ LOG_ALWAYS_FATAL_IF(status != OK, "Failed to connect to service: %s",
+ statusToString(status).c_str());
+ ret->sessions.push_back({session, session->getRootObject()});
+ }
+
+ return ret;
+}
+
+INSTANTIATE_TEST_CASE_P(Trusty, BinderRpc,
+ ::testing::Combine(::testing::Values(SocketType::TIPC),
+ ::testing::Values(RpcSecurity::RAW),
+ ::testing::ValuesIn(testVersions()),
+ ::testing::ValuesIn(testVersions()),
+ ::testing::Values(false), ::testing::Values(true)),
+ BinderRpc::PrintParamInfo);
+
+} // namespace android
+
+PORT_GTEST(BinderRpcTest, "com.android.trusty.binderRpcTest");
diff --git a/libs/binder/tests/binderRpcUniversalTests.cpp b/libs/binder/tests/binderRpcUniversalTests.cpp
index 2249e5c..11a22b0 100644
--- a/libs/binder/tests/binderRpcUniversalTests.cpp
+++ b/libs/binder/tests/binderRpcUniversalTests.cpp
@@ -386,11 +386,11 @@
EXPECT_EQ(b, weak.promote());
}
-#define expectSessions(expected, iface) \
+#define EXPECT_SESSIONS(expected, iface) \
do { \
int session; \
EXPECT_OK((iface)->getNumOpenSessions(&session)); \
- EXPECT_EQ(expected, session); \
+ EXPECT_EQ(static_cast<int>(expected), session); \
} while (false)
TEST_P(BinderRpc, SingleSession) {
@@ -402,9 +402,9 @@
EXPECT_OK(session->getName(&out));
EXPECT_EQ("aoeu", out);
- expectSessions(1, proc.rootIface);
+ EXPECT_SESSIONS(1, proc.rootIface);
session = nullptr;
- expectSessions(0, proc.rootIface);
+ EXPECT_SESSIONS(0, proc.rootIface);
}
TEST_P(BinderRpc, ManySessions) {
@@ -413,24 +413,24 @@
std::vector<sp<IBinderRpcSession>> sessions;
for (size_t i = 0; i < 15; i++) {
- expectSessions(i, proc.rootIface);
+ EXPECT_SESSIONS(i, proc.rootIface);
sp<IBinderRpcSession> session;
EXPECT_OK(proc.rootIface->openSession(std::to_string(i), &session));
sessions.push_back(session);
}
- expectSessions(sessions.size(), proc.rootIface);
+ EXPECT_SESSIONS(sessions.size(), proc.rootIface);
for (size_t i = 0; i < sessions.size(); i++) {
std::string out;
EXPECT_OK(sessions.at(i)->getName(&out));
EXPECT_EQ(std::to_string(i), out);
}
- expectSessions(sessions.size(), proc.rootIface);
+ EXPECT_SESSIONS(sessions.size(), proc.rootIface);
while (!sessions.empty()) {
sessions.pop_back();
- expectSessions(sessions.size(), proc.rootIface);
+ EXPECT_SESSIONS(sessions.size(), proc.rootIface);
}
- expectSessions(0, proc.rootIface);
+ EXPECT_SESSIONS(0, proc.rootIface);
}
TEST_P(BinderRpc, OnewayCallDoesNotWait) {
@@ -483,7 +483,7 @@
cb->mCv.wait_for(_l, 1s, [&] { return !cb->mValues.empty(); });
}
- EXPECT_EQ(cb->mValues.size(), 1)
+ EXPECT_EQ(cb->mValues.size(), 1UL)
<< "callIsOneway: " << callIsOneway
<< " callbackIsOneway: " << callbackIsOneway << " delayed: " << delayed;
if (cb->mValues.empty()) continue;
diff --git a/libs/binder/trusty/binderRpcTest/manifest.json b/libs/binder/trusty/binderRpcTest/manifest.json
new file mode 100644
index 0000000..d8b080f
--- /dev/null
+++ b/libs/binder/trusty/binderRpcTest/manifest.json
@@ -0,0 +1,6 @@
+{
+ "uuid": "9dbe9fb8-60fd-4bdd-af86-03e95d7ad78b",
+ "app_name": "binderRpcTest",
+ "min_heap": 163840,
+ "min_stack": 16384
+}
diff --git a/libs/binder/trusty/binderRpcTest/rules.mk b/libs/binder/trusty/binderRpcTest/rules.mk
new file mode 100644
index 0000000..ae39492
--- /dev/null
+++ b/libs/binder/trusty/binderRpcTest/rules.mk
@@ -0,0 +1,35 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_DIR := $(GET_LOCAL_DIR)
+LIBBINDER_TESTS_DIR := frameworks/native/libs/binder/tests
+
+MODULE := $(LOCAL_DIR)
+
+MANIFEST := $(LOCAL_DIR)/manifest.json
+
+MODULE_SRCS += \
+ $(LIBBINDER_TESTS_DIR)/binderRpcUniversalTests.cpp \
+ $(LIBBINDER_TESTS_DIR)/binderRpcTestCommon.cpp \
+ $(LIBBINDER_TESTS_DIR)/binderRpcTestTrusty.cpp \
+
+MODULE_LIBRARY_DEPS += \
+ $(LOCAL_DIR)/aidl \
+ frameworks/native/libs/binder/trusty \
+ frameworks/native/libs/binder/trusty/ndk \
+ trusty/user/base/lib/googletest \
+ trusty/user/base/lib/libstdc++-trusty \
+
+include make/trusted_app.mk
diff --git a/libs/binder/trusty/build-config-usertests b/libs/binder/trusty/build-config-usertests
new file mode 100644
index 0000000..d0a1fbc
--- /dev/null
+++ b/libs/binder/trusty/build-config-usertests
@@ -0,0 +1,19 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file lists userspace tests
+
+[
+ porttest("com.android.trusty.binderRpcTest"),
+]
diff --git a/libs/binder/trusty/include_mock/trusty-gtest.h b/libs/binder/trusty/include_mock/trusty-gtest.h
new file mode 100644
index 0000000..046b403
--- /dev/null
+++ b/libs/binder/trusty/include_mock/trusty-gtest.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#define PORT_GTEST(suite, port) \
+ int main(void) { \
+ return 0; \
+ }
diff --git a/libs/binder/trusty/include_mock/trusty_ipc.h b/libs/binder/trusty/include_mock/trusty_ipc.h
index 43ab84a..db044c2 100644
--- a/libs/binder/trusty/include_mock/trusty_ipc.h
+++ b/libs/binder/trusty/include_mock/trusty_ipc.h
@@ -27,6 +27,8 @@
#define IPC_PORT_ALLOW_TA_CONNECT 0x1
#define IPC_PORT_ALLOW_NS_CONNECT 0x2
+#define IPC_CONNECT_WAIT_FOR_PORT 0x1
+
#define IPC_HANDLE_POLL_HUP 0x1
#define IPC_HANDLE_POLL_MSG 0x2
#define IPC_HANDLE_POLL_SEND_UNBLOCKED 0x4
diff --git a/libs/binder/trusty/usertests-inc.mk b/libs/binder/trusty/usertests-inc.mk
index 2f5a7f4..1300121 100644
--- a/libs/binder/trusty/usertests-inc.mk
+++ b/libs/binder/trusty/usertests-inc.mk
@@ -14,4 +14,6 @@
#
TRUSTY_USER_TESTS += \
+ frameworks/native/libs/binder/trusty/binderRpcTest \
frameworks/native/libs/binder/trusty/binderRpcTest/service \
+
diff --git a/opengl/libs/Android.bp b/opengl/libs/Android.bp
index 750338b..49e1cba 100644
--- a/opengl/libs/Android.bp
+++ b/opengl/libs/Android.bp
@@ -144,6 +144,7 @@
srcs: [
"EGL/BlobCache.cpp",
"EGL/FileBlobCache.cpp",
+ "EGL/MultifileBlobCache.cpp",
],
export_include_dirs: ["EGL"],
}
@@ -160,7 +161,6 @@
srcs: [
"EGL/egl_tls.cpp",
"EGL/egl_cache.cpp",
- "EGL/egl_cache_multifile.cpp",
"EGL/egl_display.cpp",
"EGL/egl_object.cpp",
"EGL/egl_layers.cpp",
@@ -205,6 +205,11 @@
srcs: [
"EGL/BlobCache.cpp",
"EGL/BlobCache_test.cpp",
+ "EGL/MultifileBlobCache.cpp",
+ "EGL/MultifileBlobCache_test.cpp",
+ ],
+ shared_libs: [
+ "libutils",
],
}
diff --git a/opengl/libs/EGL/MultifileBlobCache.cpp b/opengl/libs/EGL/MultifileBlobCache.cpp
new file mode 100644
index 0000000..99af299
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.cpp
@@ -0,0 +1,689 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+
+#include "MultifileBlobCache.h"
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <log/log.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <utime.h>
+
+#include <algorithm>
+#include <chrono>
+#include <limits>
+#include <locale>
+
+#include <utils/JenkinsHash.h>
+
+using namespace std::literals;
+
+namespace {
+
+// Open the file and determine the size of the value it contains
+size_t getValueSizeFromFile(int fd, const std::string& entryPath) {
+ // Read the beginning of the file to get header
+ android::MultifileHeader header;
+ size_t result = read(fd, static_cast<void*>(&header), sizeof(android::MultifileHeader));
+ if (result != sizeof(android::MultifileHeader)) {
+ ALOGE("Error reading MultifileHeader from cache entry (%s): %s", entryPath.c_str(),
+ std::strerror(errno));
+ return 0;
+ }
+
+ return header.valueSize;
+}
+
+// Helper function to close entries or free them
+void freeHotCacheEntry(android::MultifileHotCache& entry) {
+ if (entry.entryFd != -1) {
+ // If we have an fd, then this entry was added to hot cache via INIT or GET
+ // We need to unmap and close the entry
+ munmap(entry.entryBuffer, entry.entrySize);
+ close(entry.entryFd);
+ } else {
+ // Otherwise, this was added to hot cache during SET, so it was never mapped
+ // and fd was only on the deferred thread.
+ delete[] entry.entryBuffer;
+ }
+}
+
+} // namespace
+
+namespace android {
+
+MultifileBlobCache::MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize,
+ const std::string& baseDir)
+ : mInitialized(false),
+ mMaxTotalSize(maxTotalSize),
+ mTotalCacheSize(0),
+ mHotCacheLimit(maxHotCacheSize),
+ mHotCacheSize(0),
+ mWorkerThreadIdle(true) {
+ if (baseDir.empty()) {
+ ALOGV("INIT: no baseDir provided in MultifileBlobCache constructor, returning early.");
+ return;
+ }
+
+ // Establish the name of our multifile directory
+ mMultifileDirName = baseDir + ".multifile";
+
+ // Set a limit for max key and value, ensuring at least one entry can always fit in hot cache
+ mMaxKeySize = mHotCacheLimit / 4;
+ mMaxValueSize = mHotCacheLimit / 2;
+
+ ALOGV("INIT: Initializing multifile blobcache with maxKeySize=%zu and maxValueSize=%zu",
+ mMaxKeySize, mMaxValueSize);
+
+ // Initialize our cache with the contents of the directory
+ mTotalCacheSize = 0;
+
+ // Create the worker thread
+ mTaskThread = std::thread(&MultifileBlobCache::processTasks, this);
+
+ // See if the dir exists, and initialize using its contents
+ struct stat st;
+ if (stat(mMultifileDirName.c_str(), &st) == 0) {
+ // Read all the files and gather details, then preload their contents
+ DIR* dir;
+ struct dirent* entry;
+ if ((dir = opendir(mMultifileDirName.c_str())) != nullptr) {
+ while ((entry = readdir(dir)) != nullptr) {
+ if (entry->d_name == "."s || entry->d_name == ".."s) {
+ continue;
+ }
+
+ std::string entryName = entry->d_name;
+ std::string fullPath = mMultifileDirName + "/" + entryName;
+
+ // The filename is the same as the entryHash
+ uint32_t entryHash = static_cast<uint32_t>(strtoul(entry->d_name, nullptr, 10));
+
+ ALOGV("INIT: Checking entry %u", entryHash);
+
+ // Look up the details of the file
+ struct stat st;
+ if (stat(fullPath.c_str(), &st) != 0) {
+ ALOGE("Failed to stat %s", fullPath.c_str());
+ return;
+ }
+
+ // Open the file so we can read its header
+ int fd = open(fullPath.c_str(), O_RDONLY);
+ if (fd == -1) {
+ ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+ std::strerror(errno));
+ return;
+ }
+
+ // Look up the details we track about each file
+ size_t valueSize = getValueSizeFromFile(fd, fullPath);
+
+ // If the cache entry is damaged or no good, remove it
+ // TODO: Perform any other checks
+ if (valueSize <= 0 || st.st_size <= 0 || st.st_atime <= 0) {
+ ALOGV("INIT: Entry %u has a problem! Removing.", entryHash);
+ if (remove(fullPath.c_str()) != 0) {
+ ALOGE("Error removing %s: %s", fullPath.c_str(), std::strerror(errno));
+ }
+ continue;
+ }
+
+ ALOGV("INIT: Entry %u is good, tracking it now.", entryHash);
+
+ // Note: Converting from off_t (signed) to size_t (unsigned)
+ size_t fileSize = static_cast<size_t>(st.st_size);
+ time_t accessTime = st.st_atime;
+
+ // Track details for rapid lookup later
+ trackEntry(entryHash, valueSize, fileSize, accessTime);
+
+ // Track the total size
+ increaseTotalCacheSize(fileSize);
+
+ // Preload the entry for fast retrieval
+ if ((mHotCacheSize + fileSize) < mHotCacheLimit) {
+ // Memory map the file
+ uint8_t* mappedEntry = reinterpret_cast<uint8_t*>(
+ mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+ if (mappedEntry == MAP_FAILED) {
+ ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+ }
+
+ ALOGV("INIT: Populating hot cache with fd = %i, cacheEntry = %p for "
+ "entryHash %u",
+ fd, mappedEntry, entryHash);
+
+ // Track the details of the preload so they can be retrieved later
+ if (!addToHotCache(entryHash, fd, mappedEntry, fileSize)) {
+ ALOGE("INIT Failed to add %u to hot cache", entryHash);
+ munmap(mappedEntry, fileSize);
+ close(fd);
+ return;
+ }
+ } else {
+ close(fd);
+ }
+ }
+ closedir(dir);
+ } else {
+ ALOGE("Unable to open filename: %s", mMultifileDirName.c_str());
+ }
+ } else {
+ // If the multifile directory does not exist, create it and start from scratch
+ if (mkdir(mMultifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
+ ALOGE("Unable to create directory (%s), errno (%i)", mMultifileDirName.c_str(), errno);
+ }
+ }
+
+ mInitialized = true;
+}
+
+MultifileBlobCache::~MultifileBlobCache() {
+ if (!mInitialized) {
+ return;
+ }
+
+ // Inform the worker thread we're done
+ ALOGV("DESCTRUCTOR: Shutting down worker thread");
+ DeferredTask task(TaskCommand::Exit);
+ queueTask(std::move(task));
+
+ // Wait for it to complete
+ ALOGV("DESCTRUCTOR: Waiting for worker thread to complete");
+ waitForWorkComplete();
+ if (mTaskThread.joinable()) {
+ mTaskThread.join();
+ }
+}
+
+// Set will add the entry to hot cache and start a deferred process to write it to disk
+void MultifileBlobCache::set(const void* key, EGLsizeiANDROID keySize, const void* value,
+ EGLsizeiANDROID valueSize) {
+ if (!mInitialized) {
+ return;
+ }
+
+ // Ensure key and value are under their limits
+ if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+ ALOGV("SET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+ valueSize, mMaxValueSize);
+ return;
+ }
+
+ // Generate a hash of the key and use it to track this entry
+ uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+ size_t fileSize = sizeof(MultifileHeader) + keySize + valueSize;
+
+ // If we're going to be over the cache limit, kick off a trim to clear space
+ if (getTotalSize() + fileSize > mMaxTotalSize) {
+ ALOGV("SET: Cache is full, calling trimCache to clear space");
+ trimCache(mMaxTotalSize);
+ }
+
+ ALOGV("SET: Add %u to cache", entryHash);
+
+ uint8_t* buffer = new uint8_t[fileSize];
+
+ // Write the key and value after the header
+ android::MultifileHeader header = {keySize, valueSize};
+ memcpy(static_cast<void*>(buffer), static_cast<const void*>(&header),
+ sizeof(android::MultifileHeader));
+ memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader)), static_cast<const void*>(key),
+ keySize);
+ memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader) + keySize),
+ static_cast<const void*>(value), valueSize);
+
+ std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+ // Track the size and access time for quick recall
+ trackEntry(entryHash, valueSize, fileSize, time(0));
+
+ // Update the overall cache size
+ increaseTotalCacheSize(fileSize);
+
+ // Keep the entry in hot cache for quick retrieval
+ ALOGV("SET: Adding %u to hot cache.", entryHash);
+
+ // Sending -1 as the fd indicates we don't have an fd for this
+ if (!addToHotCache(entryHash, -1, buffer, fileSize)) {
+ ALOGE("GET: Failed to add %u to hot cache", entryHash);
+ return;
+ }
+
+ // Track that we're creating a pending write for this entry
+ // Include the buffer to handle the case when multiple writes are pending for an entry
+ mDeferredWrites.insert(std::make_pair(entryHash, buffer));
+
+ // Create deferred task to write to storage
+ ALOGV("SET: Adding task to queue.");
+ DeferredTask task(TaskCommand::WriteToDisk);
+ task.initWriteToDisk(entryHash, fullPath, buffer, fileSize);
+ queueTask(std::move(task));
+}
+
+// Get will check the hot cache, then load it from disk if needed
+EGLsizeiANDROID MultifileBlobCache::get(const void* key, EGLsizeiANDROID keySize, void* value,
+ EGLsizeiANDROID valueSize) {
+ if (!mInitialized) {
+ return 0;
+ }
+
+ // Ensure key and value are under their limits
+ if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+ ALOGV("GET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+ valueSize, mMaxValueSize);
+ return 0;
+ }
+
+ // Generate a hash of the key and use it to track this entry
+ uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+ // See if we have this file
+ if (!contains(entryHash)) {
+ ALOGV("GET: Cache MISS - cache does not contain entry: %u", entryHash);
+ return 0;
+ }
+
+ // Look up the data for this entry
+ MultifileEntryStats entryStats = getEntryStats(entryHash);
+
+ size_t cachedValueSize = entryStats.valueSize;
+ if (cachedValueSize > valueSize) {
+ ALOGV("GET: Cache MISS - valueSize not large enough (%lu) for entry %u, returning required"
+ "size (%zu)",
+ valueSize, entryHash, cachedValueSize);
+ return cachedValueSize;
+ }
+
+ // We have the file and have enough room to write it out, return the entry
+ ALOGV("GET: Cache HIT - cache contains entry: %u", entryHash);
+
+ // Look up the size of the file
+ size_t fileSize = entryStats.fileSize;
+ if (keySize > fileSize) {
+ ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
+ "file",
+ keySize, fileSize);
+ return 0;
+ }
+
+ std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+ // Open the hashed filename path
+ uint8_t* cacheEntry = 0;
+
+ // Check hot cache
+ if (mHotCache.find(entryHash) != mHotCache.end()) {
+ ALOGV("GET: HotCache HIT for entry %u", entryHash);
+ cacheEntry = mHotCache[entryHash].entryBuffer;
+ } else {
+ ALOGV("GET: HotCache MISS for entry: %u", entryHash);
+
+ if (mDeferredWrites.find(entryHash) != mDeferredWrites.end()) {
+ // Wait for writes to complete if there is an outstanding write for this entry
+ ALOGV("GET: Waiting for write to complete for %u", entryHash);
+ waitForWorkComplete();
+ }
+
+ // Open the entry file
+ int fd = open(fullPath.c_str(), O_RDONLY);
+ if (fd == -1) {
+ ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+ std::strerror(errno));
+ return 0;
+ }
+
+ // Memory map the file
+ cacheEntry =
+ reinterpret_cast<uint8_t*>(mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+ if (cacheEntry == MAP_FAILED) {
+ ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+ close(fd);
+ return 0;
+ }
+
+ ALOGV("GET: Adding %u to hot cache", entryHash);
+ if (!addToHotCache(entryHash, fd, cacheEntry, fileSize)) {
+ ALOGE("GET: Failed to add %u to hot cache", entryHash);
+ return 0;
+ }
+
+ cacheEntry = mHotCache[entryHash].entryBuffer;
+ }
+
+ // Ensure the header matches
+ MultifileHeader* header = reinterpret_cast<MultifileHeader*>(cacheEntry);
+ if (header->keySize != keySize || header->valueSize != valueSize) {
+ ALOGW("Mismatch on keySize(%ld vs. cached %ld) or valueSize(%ld vs. cached %ld) compared "
+ "to cache header values for fullPath: %s",
+ keySize, header->keySize, valueSize, header->valueSize, fullPath.c_str());
+ removeFromHotCache(entryHash);
+ return 0;
+ }
+
+ // Compare the incoming key with our stored version (the beginning of the entry)
+ uint8_t* cachedKey = cacheEntry + sizeof(MultifileHeader);
+ int compare = memcmp(cachedKey, key, keySize);
+ if (compare != 0) {
+ ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
+ removeFromHotCache(entryHash);
+ return 0;
+ }
+
+ // Remaining entry following the key is the value
+ uint8_t* cachedValue = cacheEntry + (keySize + sizeof(MultifileHeader));
+ memcpy(value, cachedValue, cachedValueSize);
+
+ return cachedValueSize;
+}
+
+void MultifileBlobCache::finish() {
+ if (!mInitialized) {
+ return;
+ }
+
+ // Wait for all deferred writes to complete
+ ALOGV("FINISH: Waiting for work to complete.");
+ waitForWorkComplete();
+
+ // Close all entries in the hot cache
+ for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+ uint32_t entryHash = hotCacheIter->first;
+ MultifileHotCache entry = hotCacheIter->second;
+
+ ALOGV("FINISH: Closing hot cache entry for %u", entryHash);
+ freeHotCacheEntry(entry);
+
+ mHotCache.erase(hotCacheIter++);
+ }
+}
+
+void MultifileBlobCache::trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+ time_t accessTime) {
+ mEntries.insert(entryHash);
+ mEntryStats[entryHash] = {valueSize, fileSize, accessTime};
+}
+
+bool MultifileBlobCache::contains(uint32_t hashEntry) const {
+ return mEntries.find(hashEntry) != mEntries.end();
+}
+
+MultifileEntryStats MultifileBlobCache::getEntryStats(uint32_t entryHash) {
+ return mEntryStats[entryHash];
+}
+
+void MultifileBlobCache::increaseTotalCacheSize(size_t fileSize) {
+ mTotalCacheSize += fileSize;
+}
+
+void MultifileBlobCache::decreaseTotalCacheSize(size_t fileSize) {
+ mTotalCacheSize -= fileSize;
+}
+
+bool MultifileBlobCache::addToHotCache(uint32_t newEntryHash, int newFd, uint8_t* newEntryBuffer,
+ size_t newEntrySize) {
+ ALOGV("HOTCACHE(ADD): Adding %u to hot cache", newEntryHash);
+
+ // Clear space if we need to
+ if ((mHotCacheSize + newEntrySize) > mHotCacheLimit) {
+ ALOGV("HOTCACHE(ADD): mHotCacheSize (%zu) + newEntrySize (%zu) is to big for "
+ "mHotCacheLimit "
+ "(%zu), freeing up space for %u",
+ mHotCacheSize, newEntrySize, mHotCacheLimit, newEntryHash);
+
+ // Wait for all the files to complete writing so our hot cache is accurate
+ waitForWorkComplete();
+
+ // Free up old entries until under the limit
+ for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+ uint32_t oldEntryHash = hotCacheIter->first;
+ MultifileHotCache oldEntry = hotCacheIter->second;
+
+ // Move our iterator before deleting the entry
+ hotCacheIter++;
+ if (!removeFromHotCache(oldEntryHash)) {
+ ALOGE("HOTCACHE(ADD): Unable to remove entry %u", oldEntryHash);
+ return false;
+ }
+
+ // Clear at least half the hot cache
+ if ((mHotCacheSize + newEntrySize) <= mHotCacheLimit / 2) {
+ ALOGV("HOTCACHE(ADD): Freed enough space for %zu", mHotCacheSize);
+ break;
+ }
+ }
+ }
+
+ // Track it
+ mHotCache[newEntryHash] = {newFd, newEntryBuffer, newEntrySize};
+ mHotCacheSize += newEntrySize;
+
+ ALOGV("HOTCACHE(ADD): New hot cache size: %zu", mHotCacheSize);
+
+ return true;
+}
+
+bool MultifileBlobCache::removeFromHotCache(uint32_t entryHash) {
+ if (mHotCache.find(entryHash) != mHotCache.end()) {
+ ALOGV("HOTCACHE(REMOVE): Removing %u from hot cache", entryHash);
+
+ // Wait for all the files to complete writing so our hot cache is accurate
+ waitForWorkComplete();
+
+ ALOGV("HOTCACHE(REMOVE): Closing hot cache entry for %u", entryHash);
+ MultifileHotCache entry = mHotCache[entryHash];
+ freeHotCacheEntry(entry);
+
+ // Delete the entry from our tracking
+ mHotCacheSize -= entry.entrySize;
+ mHotCache.erase(entryHash);
+
+ return true;
+ }
+
+ return false;
+}
+
+bool MultifileBlobCache::applyLRU(size_t cacheLimit) {
+ // Walk through our map of sorted last access times and remove files until under the limit
+ for (auto cacheEntryIter = mEntryStats.begin(); cacheEntryIter != mEntryStats.end();) {
+ uint32_t entryHash = cacheEntryIter->first;
+
+ ALOGV("LRU: Removing entryHash %u", entryHash);
+
+ // Track the overall size
+ MultifileEntryStats entryStats = getEntryStats(entryHash);
+ decreaseTotalCacheSize(entryStats.fileSize);
+
+ // Remove it from hot cache if present
+ removeFromHotCache(entryHash);
+
+ // Remove it from the system
+ std::string entryPath = mMultifileDirName + "/" + std::to_string(entryHash);
+ if (remove(entryPath.c_str()) != 0) {
+ ALOGE("LRU: Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
+ return false;
+ }
+
+ // Increment the iterator before clearing the entry
+ cacheEntryIter++;
+
+ // Delete the entry from our tracking
+ size_t count = mEntryStats.erase(entryHash);
+ if (count != 1) {
+ ALOGE("LRU: Failed to remove entryHash (%u) from mEntryStats", entryHash);
+ return false;
+ }
+
+ // See if it has been reduced enough
+ size_t totalCacheSize = getTotalSize();
+ if (totalCacheSize <= cacheLimit) {
+ // Success
+ ALOGV("LRU: Reduced cache to %zu", totalCacheSize);
+ return true;
+ }
+ }
+
+ ALOGV("LRU: Cache is emptry");
+ return false;
+}
+
+// When removing files, what fraction of the overall limit should be reached when removing files
+// A divisor of two will decrease the cache to 50%, four to 25% and so on
+constexpr uint32_t kCacheLimitDivisor = 2;
+
+// Calculate the cache size and remove old entries until under the limit
+void MultifileBlobCache::trimCache(size_t cacheByteLimit) {
+ // Start with the value provided by egl_cache
+ size_t limit = cacheByteLimit;
+
+ // Wait for all deferred writes to complete
+ waitForWorkComplete();
+
+ size_t size = getTotalSize();
+
+ // If size is larger than the threshold, remove files using LRU
+ if (size > limit) {
+ ALOGV("TRIM: Multifile cache size is larger than %zu, removing old entries",
+ cacheByteLimit);
+ if (!applyLRU(limit / kCacheLimitDivisor)) {
+ ALOGE("Error when clearing multifile shader cache");
+ return;
+ }
+ }
+}
+
+// This function performs a task. It only knows how to write files to disk,
+// but it could be expanded if needed.
+void MultifileBlobCache::processTask(DeferredTask& task) {
+ switch (task.getTaskCommand()) {
+ case TaskCommand::Exit: {
+ ALOGV("DEFERRED: Shutting down");
+ return;
+ }
+ case TaskCommand::WriteToDisk: {
+ uint32_t entryHash = task.getEntryHash();
+ std::string& fullPath = task.getFullPath();
+ uint8_t* buffer = task.getBuffer();
+ size_t bufferSize = task.getBufferSize();
+
+ // Create the file or reset it if already present, read+write for user only
+ int fd = open(fullPath.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+ if (fd == -1) {
+ ALOGE("Cache error in SET - failed to open fullPath: %s, error: %s",
+ fullPath.c_str(), std::strerror(errno));
+ return;
+ }
+
+ ALOGV("DEFERRED: Opened fd %i from %s", fd, fullPath.c_str());
+
+ ssize_t result = write(fd, buffer, bufferSize);
+ if (result != bufferSize) {
+ ALOGE("Error writing fileSize to cache entry (%s): %s", fullPath.c_str(),
+ std::strerror(errno));
+ return;
+ }
+
+ ALOGV("DEFERRED: Completed write for: %s", fullPath.c_str());
+ close(fd);
+
+ // Erase the entry from mDeferredWrites
+ // Since there could be multiple outstanding writes for an entry, find the matching one
+ typedef std::multimap<uint32_t, uint8_t*>::iterator entryIter;
+ std::pair<entryIter, entryIter> iterPair = mDeferredWrites.equal_range(entryHash);
+ for (entryIter it = iterPair.first; it != iterPair.second; ++it) {
+ if (it->second == buffer) {
+ ALOGV("DEFERRED: Marking write complete for %u at %p", it->first, it->second);
+ mDeferredWrites.erase(it);
+ break;
+ }
+ }
+
+ return;
+ }
+ default: {
+ ALOGE("DEFERRED: Unhandled task type");
+ return;
+ }
+ }
+}
+
+// This function will wait until tasks arrive, then execute them
+// If the exit command is submitted, the loop will terminate
+void MultifileBlobCache::processTasksImpl(bool* exitThread) {
+ while (true) {
+ std::unique_lock<std::mutex> lock(mWorkerMutex);
+ if (mTasks.empty()) {
+ ALOGV("WORKER: No tasks available, waiting");
+ mWorkerThreadIdle = true;
+ mWorkerIdleCondition.notify_all();
+ // Only wake if notified and command queue is not empty
+ mWorkAvailableCondition.wait(lock, [this] { return !mTasks.empty(); });
+ }
+
+ ALOGV("WORKER: Task available, waking up.");
+ mWorkerThreadIdle = false;
+ DeferredTask task = std::move(mTasks.front());
+ mTasks.pop();
+
+ if (task.getTaskCommand() == TaskCommand::Exit) {
+ ALOGV("WORKER: Exiting work loop.");
+ *exitThread = true;
+ mWorkerThreadIdle = true;
+ mWorkerIdleCondition.notify_one();
+ return;
+ }
+
+ lock.unlock();
+ processTask(task);
+ }
+}
+
+// Process tasks until the exit task is submitted
+void MultifileBlobCache::processTasks() {
+ while (true) {
+ bool exitThread = false;
+ processTasksImpl(&exitThread);
+ if (exitThread) {
+ break;
+ }
+ }
+}
+
+// Add a task to the queue to be processed by the worker thread
+void MultifileBlobCache::queueTask(DeferredTask&& task) {
+ std::lock_guard<std::mutex> queueLock(mWorkerMutex);
+ mTasks.emplace(std::move(task));
+ mWorkAvailableCondition.notify_one();
+}
+
+// Wait until all tasks have been completed
+void MultifileBlobCache::waitForWorkComplete() {
+ std::unique_lock<std::mutex> lock(mWorkerMutex);
+ mWorkerIdleCondition.wait(lock, [this] { return (mTasks.empty() && mWorkerThreadIdle); });
+}
+
+}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/MultifileBlobCache.h b/opengl/libs/EGL/MultifileBlobCache.h
new file mode 100644
index 0000000..c0cc9dc
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.h
@@ -0,0 +1,167 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_MULTIFILE_BLOB_CACHE_H
+#define ANDROID_MULTIFILE_BLOB_CACHE_H
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include <future>
+#include <map>
+#include <queue>
+#include <string>
+#include <thread>
+#include <unordered_map>
+#include <unordered_set>
+
+namespace android {
+
+struct MultifileHeader {
+ EGLsizeiANDROID keySize;
+ EGLsizeiANDROID valueSize;
+};
+
+struct MultifileEntryStats {
+ EGLsizeiANDROID valueSize;
+ size_t fileSize;
+ time_t accessTime;
+};
+
+struct MultifileHotCache {
+ int entryFd;
+ uint8_t* entryBuffer;
+ size_t entrySize;
+};
+
+enum class TaskCommand {
+ Invalid = 0,
+ WriteToDisk,
+ Exit,
+};
+
+class DeferredTask {
+public:
+ DeferredTask(TaskCommand command)
+ : mCommand(command), mEntryHash(0), mBuffer(nullptr), mBufferSize(0) {}
+
+ TaskCommand getTaskCommand() { return mCommand; }
+
+ void initWriteToDisk(uint32_t entryHash, std::string fullPath, uint8_t* buffer,
+ size_t bufferSize) {
+ mCommand = TaskCommand::WriteToDisk;
+ mEntryHash = entryHash;
+ mFullPath = std::move(fullPath);
+ mBuffer = buffer;
+ mBufferSize = bufferSize;
+ }
+
+ uint32_t getEntryHash() { return mEntryHash; }
+ std::string& getFullPath() { return mFullPath; }
+ uint8_t* getBuffer() { return mBuffer; }
+ size_t getBufferSize() { return mBufferSize; };
+
+private:
+ TaskCommand mCommand;
+
+ // Parameters for WriteToDisk
+ uint32_t mEntryHash;
+ std::string mFullPath;
+ uint8_t* mBuffer;
+ size_t mBufferSize;
+};
+
+class MultifileBlobCache {
+public:
+ MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize, const std::string& baseDir);
+ ~MultifileBlobCache();
+
+ void set(const void* key, EGLsizeiANDROID keySize, const void* value,
+ EGLsizeiANDROID valueSize);
+ EGLsizeiANDROID get(const void* key, EGLsizeiANDROID keySize, void* value,
+ EGLsizeiANDROID valueSize);
+
+ void finish();
+
+ size_t getTotalSize() const { return mTotalCacheSize; }
+
+private:
+ void trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+ time_t accessTime);
+ bool contains(uint32_t entryHash) const;
+ bool removeEntry(uint32_t entryHash);
+ MultifileEntryStats getEntryStats(uint32_t entryHash);
+
+ size_t getFileSize(uint32_t entryHash);
+ size_t getValueSize(uint32_t entryHash);
+
+ void increaseTotalCacheSize(size_t fileSize);
+ void decreaseTotalCacheSize(size_t fileSize);
+
+ bool addToHotCache(uint32_t entryHash, int fd, uint8_t* entryBufer, size_t entrySize);
+ bool removeFromHotCache(uint32_t entryHash);
+
+ void trimCache(size_t cacheByteLimit);
+ bool applyLRU(size_t cacheLimit);
+
+ bool mInitialized;
+ std::string mMultifileDirName;
+
+ std::unordered_set<uint32_t> mEntries;
+ std::unordered_map<uint32_t, MultifileEntryStats> mEntryStats;
+ std::unordered_map<uint32_t, MultifileHotCache> mHotCache;
+
+ size_t mMaxKeySize;
+ size_t mMaxValueSize;
+ size_t mMaxTotalSize;
+ size_t mTotalCacheSize;
+ size_t mHotCacheLimit;
+ size_t mHotCacheEntryLimit;
+ size_t mHotCacheSize;
+
+ // Below are the components used for deferred writes
+
+ // Track whether we have pending writes for an entry
+ std::multimap<uint32_t, uint8_t*> mDeferredWrites;
+
+ // Functions to work through tasks in the queue
+ void processTasks();
+ void processTasksImpl(bool* exitThread);
+ void processTask(DeferredTask& task);
+
+ // Used by main thread to create work for the worker thread
+ void queueTask(DeferredTask&& task);
+
+ // Used by main thread to wait for worker thread to complete all outstanding work.
+ void waitForWorkComplete();
+
+ std::thread mTaskThread;
+ std::queue<DeferredTask> mTasks;
+ std::mutex mWorkerMutex;
+
+ // This condition will block the worker thread until a task is queued
+ std::condition_variable mWorkAvailableCondition;
+
+ // This condition will block the main thread while the worker thread still has tasks
+ std::condition_variable mWorkerIdleCondition;
+
+ // This bool will track whether all tasks have been completed
+ bool mWorkerThreadIdle;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MULTIFILE_BLOB_CACHE_H
diff --git a/opengl/libs/EGL/MultifileBlobCache_test.cpp b/opengl/libs/EGL/MultifileBlobCache_test.cpp
new file mode 100644
index 0000000..1a55a4f
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache_test.cpp
@@ -0,0 +1,200 @@
+/*
+ ** Copyright 2023, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#include "MultifileBlobCache.h"
+
+#include <android-base/test_utils.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <stdio.h>
+
+#include <memory>
+
+namespace android {
+
+template <typename T>
+using sp = std::shared_ptr<T>;
+
+constexpr size_t kMaxTotalSize = 32 * 1024;
+constexpr size_t kMaxPreloadSize = 8 * 1024;
+
+constexpr size_t kMaxKeySize = kMaxPreloadSize / 4;
+constexpr size_t kMaxValueSize = kMaxPreloadSize / 2;
+
+class MultifileBlobCacheTest : public ::testing::Test {
+protected:
+ virtual void SetUp() {
+ mTempFile.reset(new TemporaryFile());
+ mMBC.reset(new MultifileBlobCache(kMaxTotalSize, kMaxPreloadSize, &mTempFile->path[0]));
+ }
+
+ virtual void TearDown() { mMBC.reset(); }
+
+ std::unique_ptr<TemporaryFile> mTempFile;
+ std::unique_ptr<MultifileBlobCache> mMBC;
+};
+
+TEST_F(MultifileBlobCacheTest, CacheSingleValueSucceeds) {
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+ ASSERT_EQ('e', buf[0]);
+ ASSERT_EQ('f', buf[1]);
+ ASSERT_EQ('g', buf[2]);
+ ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheTwoValuesSucceeds) {
+ unsigned char buf[2] = {0xee, 0xee};
+ mMBC->set("ab", 2, "cd", 2);
+ mMBC->set("ef", 2, "gh", 2);
+ ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+ ASSERT_EQ('c', buf[0]);
+ ASSERT_EQ('d', buf[1]);
+ ASSERT_EQ(size_t(2), mMBC->get("ef", 2, buf, 2));
+ ASSERT_EQ('g', buf[0]);
+ ASSERT_EQ('h', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetSetTwiceSucceeds) {
+ unsigned char buf[2] = {0xee, 0xee};
+ mMBC->set("ab", 2, "cd", 2);
+ ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+ ASSERT_EQ('c', buf[0]);
+ ASSERT_EQ('d', buf[1]);
+ // Use the same key, but different value
+ mMBC->set("ab", 2, "ef", 2);
+ ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+ ASSERT_EQ('e', buf[0]);
+ ASSERT_EQ('f', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesInsideBounds) {
+ unsigned char buf[6] = {0xee, 0xee, 0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf + 1, 4));
+ ASSERT_EQ(0xee, buf[0]);
+ ASSERT_EQ('e', buf[1]);
+ ASSERT_EQ('f', buf[2]);
+ ASSERT_EQ('g', buf[3]);
+ ASSERT_EQ('h', buf[4]);
+ ASSERT_EQ(0xee, buf[5]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesIfBufferIsLargeEnough) {
+ unsigned char buf[3] = {0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 3));
+ ASSERT_EQ(0xee, buf[0]);
+ ASSERT_EQ(0xee, buf[1]);
+ ASSERT_EQ(0xee, buf[2]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetDoesntAccessNullBuffer) {
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, nullptr, 0));
+}
+
+TEST_F(MultifileBlobCacheTest, MultipleSetsCacheLatestValue) {
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ mMBC->set("abcd", 4, "ijkl", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+ ASSERT_EQ('i', buf[0]);
+ ASSERT_EQ('j', buf[1]);
+ ASSERT_EQ('k', buf[2]);
+ ASSERT_EQ('l', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, SecondSetKeepsFirstValueIfTooLarge) {
+ unsigned char buf[kMaxValueSize + 1] = {0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+ ASSERT_EQ('e', buf[0]);
+ ASSERT_EQ('f', buf[1]);
+ ASSERT_EQ('g', buf[2]);
+ ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfKeyIsTooBig) {
+ char key[kMaxKeySize + 1];
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ for (int i = 0; i < kMaxKeySize + 1; i++) {
+ key[i] = 'a';
+ }
+ mMBC->set(key, kMaxKeySize + 1, "bbbb", 4);
+ ASSERT_EQ(size_t(0), mMBC->get(key, kMaxKeySize + 1, buf, 4));
+ ASSERT_EQ(0xee, buf[0]);
+ ASSERT_EQ(0xee, buf[1]);
+ ASSERT_EQ(0xee, buf[2]);
+ ASSERT_EQ(0xee, buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfValueIsTooBig) {
+ char buf[kMaxValueSize + 1];
+ for (int i = 0; i < kMaxValueSize + 1; i++) {
+ buf[i] = 'b';
+ }
+ mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+ for (int i = 0; i < kMaxValueSize + 1; i++) {
+ buf[i] = 0xee;
+ }
+ ASSERT_EQ(size_t(0), mMBC->get("abcd", 4, buf, kMaxValueSize + 1));
+ for (int i = 0; i < kMaxValueSize + 1; i++) {
+ SCOPED_TRACE(i);
+ ASSERT_EQ(0xee, buf[i]);
+ }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxKeySizeSucceeds) {
+ char key[kMaxKeySize];
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ for (int i = 0; i < kMaxKeySize; i++) {
+ key[i] = 'a';
+ }
+ mMBC->set(key, kMaxKeySize, "wxyz", 4);
+ ASSERT_EQ(size_t(4), mMBC->get(key, kMaxKeySize, buf, 4));
+ ASSERT_EQ('w', buf[0]);
+ ASSERT_EQ('x', buf[1]);
+ ASSERT_EQ('y', buf[2]);
+ ASSERT_EQ('z', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxValueSizeSucceeds) {
+ char buf[kMaxValueSize];
+ for (int i = 0; i < kMaxValueSize; i++) {
+ buf[i] = 'b';
+ }
+ mMBC->set("abcd", 4, buf, kMaxValueSize);
+ for (int i = 0; i < kMaxValueSize; i++) {
+ buf[i] = 0xee;
+ }
+ mMBC->get("abcd", 4, buf, kMaxValueSize);
+ for (int i = 0; i < kMaxValueSize; i++) {
+ SCOPED_TRACE(i);
+ ASSERT_EQ('b', buf[i]);
+ }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMinKeyAndValueSizeSucceeds) {
+ unsigned char buf[1] = {0xee};
+ mMBC->set("x", 1, "y", 1);
+ ASSERT_EQ(size_t(1), mMBC->get("x", 1, buf, 1));
+ ASSERT_EQ('y', buf[0]);
+}
+
+} // namespace android
diff --git a/opengl/libs/EGL/egl_cache.cpp b/opengl/libs/EGL/egl_cache.cpp
index 1e8a348..b00ee33 100644
--- a/opengl/libs/EGL/egl_cache.cpp
+++ b/opengl/libs/EGL/egl_cache.cpp
@@ -14,6 +14,8 @@
** limitations under the License.
*/
+// #define LOG_NDEBUG 0
+
#include "egl_cache.h"
#include <android-base/properties.h>
@@ -25,22 +27,19 @@
#include <thread>
#include "../egl_impl.h"
-#include "egl_cache_multifile.h"
#include "egl_display.h"
// Monolithic cache size limits.
-static const size_t maxKeySize = 12 * 1024;
-static const size_t maxValueSize = 64 * 1024;
-static const size_t maxTotalSize = 32 * 1024 * 1024;
+static const size_t kMaxMonolithicKeySize = 12 * 1024;
+static const size_t kMaxMonolithicValueSize = 64 * 1024;
+static const size_t kMaxMonolithicTotalSize = 2 * 1024 * 1024;
// The time in seconds to wait before saving newly inserted monolithic cache entries.
-static const unsigned int deferredSaveDelay = 4;
+static const unsigned int kDeferredMonolithicSaveDelay = 4;
-// Multifile cache size limit
-constexpr size_t kMultifileCacheByteLimit = 64 * 1024 * 1024;
-
-// Delay before cleaning up multifile cache entries
-static const unsigned int deferredMultifileCleanupDelaySeconds = 1;
+// Multifile cache size limits
+constexpr uint32_t kMultifileHotCacheLimit = 8 * 1024 * 1024;
+constexpr uint32_t kMultifileCacheByteLimit = 32 * 1024 * 1024;
namespace android {
@@ -68,10 +67,7 @@
// egl_cache_t definition
//
egl_cache_t::egl_cache_t()
- : mInitialized(false),
- mMultifileMode(false),
- mCacheByteLimit(maxTotalSize),
- mMultifileCleanupPending(false) {}
+ : mInitialized(false), mMultifileMode(false), mCacheByteLimit(kMaxMonolithicTotalSize) {}
egl_cache_t::~egl_cache_t() {}
@@ -85,7 +81,7 @@
std::lock_guard<std::mutex> lock(mMutex);
egl_connection_t* const cnx = &gEGLImpl;
- if (cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
+ if (display && cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
const char* exts = display->disp.queryString.extensions;
size_t bcExtLen = strlen(BC_EXT_STR);
size_t extsLen = strlen(exts);
@@ -114,14 +110,36 @@
}
}
- // Allow forcing monolithic cache for debug purposes
- if (base::GetProperty("debug.egl.blobcache.multifilemode", "") == "false") {
- ALOGD("Forcing monolithic cache due to debug.egl.blobcache.multifilemode == \"false\"");
+ // Check the device config to decide whether multifile should be used
+ if (base::GetBoolProperty("ro.egl.blobcache.multifile", false)) {
+ mMultifileMode = true;
+ ALOGV("Using multifile EGL blobcache");
+ }
+
+ // Allow forcing the mode for debug purposes
+ std::string mode = base::GetProperty("debug.egl.blobcache.multifile", "");
+ if (mode == "true") {
+ ALOGV("Forcing multifile cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
+ mMultifileMode = true;
+ } else if (mode == "false") {
+ ALOGV("Forcing monolithic cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
mMultifileMode = false;
}
if (mMultifileMode) {
- mCacheByteLimit = kMultifileCacheByteLimit;
+ mCacheByteLimit = static_cast<size_t>(
+ base::GetUintProperty<uint32_t>("ro.egl.blobcache.multifile_limit",
+ kMultifileCacheByteLimit));
+
+ // Check for a debug value
+ int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.multifile_limit", -1);
+ if (debugCacheSize >= 0) {
+ ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.multifile_limit",
+ mCacheByteLimit, debugCacheSize);
+ mCacheByteLimit = debugCacheSize;
+ }
+
+ ALOGV("Using multifile EGL blobcache limit of %zu bytes", mCacheByteLimit);
}
mInitialized = true;
@@ -133,10 +151,10 @@
mBlobCache->writeToFile();
}
mBlobCache = nullptr;
- if (mMultifileMode) {
- checkMultifileCacheSize(mCacheByteLimit);
+ if (mMultifileBlobCache) {
+ mMultifileBlobCache->finish();
}
- mMultifileMode = false;
+ mMultifileBlobCache = nullptr;
mInitialized = false;
}
@@ -151,20 +169,8 @@
if (mInitialized) {
if (mMultifileMode) {
- setBlobMultifile(key, keySize, value, valueSize, mFilename);
-
- if (!mMultifileCleanupPending) {
- mMultifileCleanupPending = true;
- // Kick off a thread to cull cache files below limit
- std::thread deferredMultifileCleanupThread([this]() {
- sleep(deferredMultifileCleanupDelaySeconds);
- std::lock_guard<std::mutex> lock(mMutex);
- // Check the size of cache and remove entries to stay under limit
- checkMultifileCacheSize(mCacheByteLimit);
- mMultifileCleanupPending = false;
- });
- deferredMultifileCleanupThread.detach();
- }
+ MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+ mbc->set(key, keySize, value, valueSize);
} else {
BlobCache* bc = getBlobCacheLocked();
bc->set(key, keySize, value, valueSize);
@@ -172,7 +178,7 @@
if (!mSavePending) {
mSavePending = true;
std::thread deferredSaveThread([this]() {
- sleep(deferredSaveDelay);
+ sleep(kDeferredMonolithicSaveDelay);
std::lock_guard<std::mutex> lock(mMutex);
if (mInitialized && mBlobCache) {
mBlobCache->writeToFile();
@@ -196,15 +202,21 @@
if (mInitialized) {
if (mMultifileMode) {
- return getBlobMultifile(key, keySize, value, valueSize, mFilename);
+ MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+ return mbc->get(key, keySize, value, valueSize);
} else {
BlobCache* bc = getBlobCacheLocked();
return bc->get(key, keySize, value, valueSize);
}
}
+
return 0;
}
+void egl_cache_t::setCacheMode(EGLCacheMode cacheMode) {
+ mMultifileMode = (cacheMode == EGLCacheMode::Multifile);
+}
+
void egl_cache_t::setCacheFilename(const char* filename) {
std::lock_guard<std::mutex> lock(mMutex);
mFilename = filename;
@@ -216,7 +228,7 @@
if (!mMultifileMode) {
// If we're not in multifile mode, ensure the cache limit is only being lowered,
// not increasing above the hard coded platform limit
- if (cacheByteLimit > maxTotalSize) {
+ if (cacheByteLimit > kMaxMonolithicTotalSize) {
return;
}
}
@@ -226,8 +238,8 @@
size_t egl_cache_t::getCacheSize() {
std::lock_guard<std::mutex> lock(mMutex);
- if (mMultifileMode) {
- return getMultifileCacheSize();
+ if (mMultifileBlobCache) {
+ return mMultifileBlobCache->getTotalSize();
}
if (mBlobCache) {
return mBlobCache->getSize();
@@ -237,9 +249,18 @@
BlobCache* egl_cache_t::getBlobCacheLocked() {
if (mBlobCache == nullptr) {
- mBlobCache.reset(new FileBlobCache(maxKeySize, maxValueSize, mCacheByteLimit, mFilename));
+ mBlobCache.reset(new FileBlobCache(kMaxMonolithicKeySize, kMaxMonolithicValueSize,
+ mCacheByteLimit, mFilename));
}
return mBlobCache.get();
}
+MultifileBlobCache* egl_cache_t::getMultifileBlobCacheLocked() {
+ if (mMultifileBlobCache == nullptr) {
+ mMultifileBlobCache.reset(
+ new MultifileBlobCache(mCacheByteLimit, kMultifileHotCacheLimit, mFilename));
+ }
+ return mMultifileBlobCache.get();
+}
+
}; // namespace android
diff --git a/opengl/libs/EGL/egl_cache.h b/opengl/libs/EGL/egl_cache.h
index 2dcd803..1399368 100644
--- a/opengl/libs/EGL/egl_cache.h
+++ b/opengl/libs/EGL/egl_cache.h
@@ -25,6 +25,7 @@
#include <string>
#include "FileBlobCache.h"
+#include "MultifileBlobCache.h"
namespace android {
@@ -32,6 +33,11 @@
class EGLAPI egl_cache_t {
public:
+ enum class EGLCacheMode {
+ Monolithic,
+ Multifile,
+ };
+
// get returns a pointer to the singleton egl_cache_t object. This
// singleton object will never be destroyed.
static egl_cache_t* get();
@@ -64,6 +70,9 @@
// cache contents from one program invocation to another.
void setCacheFilename(const char* filename);
+ // Allow setting monolithic or multifile modes
+ void setCacheMode(EGLCacheMode cacheMode);
+
// Allow the fixed cache limit to be overridden
void setCacheLimit(int64_t cacheByteLimit);
@@ -85,6 +94,9 @@
// possible.
BlobCache* getBlobCacheLocked();
+ // Get or create the multifile blobcache
+ MultifileBlobCache* getMultifileBlobCacheLocked();
+
// mInitialized indicates whether the egl_cache_t is in the initialized
// state. It is initialized to false at construction time, and gets set to
// true when initialize is called. It is set back to false when terminate
@@ -98,6 +110,9 @@
// first time it's needed.
std::unique_ptr<FileBlobCache> mBlobCache;
+ // The multifile version of blobcache allowing larger contents to be stored
+ std::unique_ptr<MultifileBlobCache> mMultifileBlobCache;
+
// mFilename is the name of the file for storing cache contents in between
// program invocations. It is initialized to an empty string at
// construction time, and can be set with the setCacheFilename method. An
@@ -123,11 +138,7 @@
bool mMultifileMode;
// Cache limit
- int64_t mCacheByteLimit;
-
- // Whether we've kicked off a side thread that will check the multifile
- // cache size and remove entries if needed.
- bool mMultifileCleanupPending;
+ size_t mCacheByteLimit;
};
}; // namespace android
diff --git a/opengl/libs/EGL/egl_cache_multifile.cpp b/opengl/libs/EGL/egl_cache_multifile.cpp
deleted file mode 100644
index 48e557f..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-
-#include "egl_cache_multifile.h"
-
-#include <android-base/properties.h>
-#include <dirent.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <log/log.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <utime.h>
-
-#include <algorithm>
-#include <chrono>
-#include <fstream>
-#include <limits>
-#include <locale>
-#include <map>
-#include <sstream>
-#include <unordered_map>
-
-#include <utils/JenkinsHash.h>
-
-static std::string multifileDirName = "";
-
-using namespace std::literals;
-
-namespace {
-
-// Create a directory for tracking multiple files
-void setupMultifile(const std::string& baseDir) {
- // If we've already set up the multifile dir in this base directory, we're done
- if (!multifileDirName.empty() && multifileDirName.find(baseDir) != std::string::npos) {
- return;
- }
-
- // Otherwise, create it
- multifileDirName = baseDir + ".multifile";
- if (mkdir(multifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
- ALOGW("Unable to create directory (%s), errno (%i)", multifileDirName.c_str(), errno);
- }
-}
-
-// Create a filename that is based on the hash of the key
-std::string getCacheEntryFilename(const void* key, EGLsizeiANDROID keySize,
- const std::string& baseDir) {
- // Hash the key into a string
- std::stringstream keyName;
- keyName << android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
-
- // Build a filename using dir and hash
- return baseDir + "/" + keyName.str();
-}
-
-// Determine file age based on stat modification time
-// Newer files have a higher age (time since epoch)
-time_t getFileAge(const std::string& filePath) {
- struct stat st;
- if (stat(filePath.c_str(), &st) == 0) {
- ALOGD("getFileAge returning %" PRId64 " for file age", static_cast<uint64_t>(st.st_mtime));
- return st.st_mtime;
- } else {
- ALOGW("Failed to stat %s", filePath.c_str());
- return 0;
- }
-}
-
-size_t getFileSize(const std::string& filePath) {
- struct stat st;
- if (stat(filePath.c_str(), &st) != 0) {
- ALOGE("Unable to stat %s", filePath.c_str());
- return 0;
- }
- return st.st_size;
-}
-
-// Walk through directory entries and track age and size
-// Then iterate through the entries, oldest first, and remove them until under the limit.
-// This will need to be updated if we move to a multilevel cache dir.
-bool applyLRU(size_t cacheLimit) {
- // Build a multimap of files indexed by age.
- // They will be automatically sorted smallest (oldest) to largest (newest)
- std::multimap<time_t, std::string> agesToFiles;
-
- // Map files to sizes
- std::unordered_map<std::string, size_t> filesToSizes;
-
- size_t totalCacheSize = 0;
-
- DIR* dir;
- struct dirent* entry;
- if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
- while ((entry = readdir(dir)) != nullptr) {
- if (entry->d_name == "."s || entry->d_name == ".."s) {
- continue;
- }
-
- // Look up each file age
- std::string fullPath = multifileDirName + "/" + entry->d_name;
- time_t fileAge = getFileAge(fullPath);
-
- // Track the files, sorted by age
- agesToFiles.insert(std::make_pair(fileAge, fullPath));
-
- // Also track the size so we know how much room we have freed
- size_t fileSize = getFileSize(fullPath);
- filesToSizes[fullPath] = fileSize;
- totalCacheSize += fileSize;
- }
- closedir(dir);
- } else {
- ALOGE("Unable to open filename: %s", multifileDirName.c_str());
- return false;
- }
-
- if (totalCacheSize <= cacheLimit) {
- // If LRU was called on a sufficiently small cache, no need to remove anything
- return true;
- }
-
- // Walk through the map of files until we're under the cache size
- for (const auto& cacheEntryIter : agesToFiles) {
- time_t entryAge = cacheEntryIter.first;
- const std::string entryPath = cacheEntryIter.second;
-
- ALOGD("Removing %s with age %ld", entryPath.c_str(), entryAge);
- if (std::remove(entryPath.c_str()) != 0) {
- ALOGE("Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
- return false;
- }
-
- totalCacheSize -= filesToSizes[entryPath];
- if (totalCacheSize <= cacheLimit) {
- // Success
- ALOGV("Reduced cache to %zu", totalCacheSize);
- return true;
- } else {
- ALOGD("Cache size is still too large (%zu), removing more files", totalCacheSize);
- }
- }
-
- // Should never reach this return
- return false;
-}
-
-} // namespace
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir) {
- if (baseDir.empty()) {
- return;
- }
-
- setupMultifile(baseDir);
- std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
- ALOGD("Attempting to open filename for set: %s", filename.c_str());
- std::ofstream outfile(filename, std::ofstream::binary);
- if (outfile.fail()) {
- ALOGW("Unable to open filename: %s", filename.c_str());
- return;
- }
-
- // First write the key
- outfile.write(static_cast<const char*>(key), keySize);
- if (outfile.bad()) {
- ALOGW("Unable to write key to filename: %s", filename.c_str());
- outfile.close();
- return;
- }
- ALOGD("Wrote %i bytes to out file for key", static_cast<int>(outfile.tellp()));
-
- // Then write the value
- outfile.write(static_cast<const char*>(value), valueSize);
- if (outfile.bad()) {
- ALOGW("Unable to write value to filename: %s", filename.c_str());
- outfile.close();
- return;
- }
- ALOGD("Wrote %i bytes to out file for full entry", static_cast<int>(outfile.tellp()));
-
- outfile.close();
-}
-
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir) {
- if (baseDir.empty()) {
- return 0;
- }
-
- setupMultifile(baseDir);
- std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
- // Open the hashed filename path
- ALOGD("Attempting to open filename for get: %s", filename.c_str());
- int fd = open(filename.c_str(), O_RDONLY);
-
- // File doesn't exist, this is a MISS, return zero bytes read
- if (fd == -1) {
- ALOGD("Cache MISS - failed to open filename: %s, error: %s", filename.c_str(),
- std::strerror(errno));
- return 0;
- }
-
- ALOGD("Cache HIT - opened filename: %s", filename.c_str());
-
- // Get the size of the file
- size_t entrySize = getFileSize(filename);
- if (keySize > entrySize) {
- ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
- "file",
- keySize, entrySize);
- close(fd);
- return 0;
- }
-
- // Memory map the file
- uint8_t* cacheEntry =
- reinterpret_cast<uint8_t*>(mmap(nullptr, entrySize, PROT_READ, MAP_PRIVATE, fd, 0));
- if (cacheEntry == MAP_FAILED) {
- ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
- close(fd);
- return 0;
- }
-
- // Compare the incoming key with our stored version (the beginning of the entry)
- int compare = memcmp(cacheEntry, key, keySize);
- if (compare != 0) {
- ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
- munmap(cacheEntry, entrySize);
- close(fd);
- return 0;
- }
-
- // Keys matched, so remaining cache is value size
- size_t cachedValueSize = entrySize - keySize;
-
- // Return actual value size if valueSize is not large enough
- if (cachedValueSize > valueSize) {
- ALOGD("Skipping file read, not enough room provided (valueSize): %lu, "
- "returning required space as %zu",
- valueSize, cachedValueSize);
- munmap(cacheEntry, entrySize);
- close(fd);
- return cachedValueSize;
- }
-
- // Remaining entry following the key is the value
- uint8_t* cachedValue = cacheEntry + keySize;
- memcpy(value, cachedValue, cachedValueSize);
- munmap(cacheEntry, entrySize);
- close(fd);
-
- ALOGD("Read %zu bytes from %s", cachedValueSize, filename.c_str());
- return cachedValueSize;
-}
-
-// Walk through the files in our flat directory, checking the size of each one.
-// Return the total size of normal files in the directory.
-// This will need to be updated if we move to a multilevel cache dir.
-size_t getMultifileCacheSize() {
- if (multifileDirName.empty()) {
- return 0;
- }
-
- DIR* dir;
- struct dirent* entry;
- size_t size = 0;
-
- ALOGD("Using %s as the multifile cache dir ", multifileDirName.c_str());
-
- if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
- while ((entry = readdir(dir)) != nullptr) {
- if (entry->d_name == "."s || entry->d_name == ".."s) {
- continue;
- }
-
- // Add up the size of all files in the dir
- std::string fullPath = multifileDirName + "/" + entry->d_name;
- size += getFileSize(fullPath);
- }
- closedir(dir);
- } else {
- ALOGW("Unable to open filename: %s", multifileDirName.c_str());
- return 0;
- }
-
- return size;
-}
-
-// When removing files, what fraction of the overall limit should be reached when removing files
-// A divisor of two will decrease the cache to 50%, four to 25% and so on
-constexpr uint32_t kCacheLimitDivisor = 2;
-
-// Calculate the cache size and remove old entries until under the limit
-void checkMultifileCacheSize(size_t cacheByteLimit) {
- // Start with the value provided by egl_cache
- size_t limit = cacheByteLimit;
-
- // Check for a debug value
- int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.bytelimit", -1);
- if (debugCacheSize >= 0) {
- ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.bytelimit", limit,
- debugCacheSize);
- limit = debugCacheSize;
- }
-
- // Tally up the initial amount of cache in use
- size_t size = getMultifileCacheSize();
- ALOGD("Multifile cache dir size: %zu", size);
-
- // If size is larger than the threshold, remove files using LRU
- if (size > limit) {
- ALOGV("Multifile cache size is larger than %zu, removing old entries", cacheByteLimit);
- if (!applyLRU(limit / kCacheLimitDivisor)) {
- ALOGE("Error when clearing multifile shader cache");
- return;
- }
- }
- ALOGD("Multifile cache size after reduction: %zu", getMultifileCacheSize());
-}
-
-}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/egl_cache_multifile.h b/opengl/libs/EGL/egl_cache_multifile.h
deleted file mode 100644
index ee5fe81..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_EGL_CACHE_MULTIFILE_H
-#define ANDROID_EGL_CACHE_MULTIFILE_H
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-
-#include <string>
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir);
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir);
-size_t getMultifileCacheSize();
-void checkMultifileCacheSize(size_t cacheByteLimit);
-
-}; // namespace android
-
-#endif // ANDROID_EGL_CACHE_MULTIFILE_H
diff --git a/opengl/tests/EGLTest/egl_cache_test.cpp b/opengl/tests/EGLTest/egl_cache_test.cpp
index 265bec4..2b3e3a4 100644
--- a/opengl/tests/EGLTest/egl_cache_test.cpp
+++ b/opengl/tests/EGLTest/egl_cache_test.cpp
@@ -24,7 +24,7 @@
#include <android-base/test_utils.h>
#include "egl_cache.h"
-#include "egl_cache_multifile.h"
+#include "MultifileBlobCache.h"
#include "egl_display.h"
#include <memory>
@@ -33,12 +33,16 @@
namespace android {
-class EGLCacheTest : public ::testing::Test {
+class EGLCacheTest : public ::testing::TestWithParam<egl_cache_t::EGLCacheMode> {
protected:
virtual void SetUp() {
- mCache = egl_cache_t::get();
+ // Terminate to clean up any previous cache in this process
+ mCache->terminate();
+
mTempFile.reset(new TemporaryFile());
mCache->setCacheFilename(&mTempFile->path[0]);
+ mCache->setCacheLimit(1024);
+ mCache->setCacheMode(mCacheMode);
}
virtual void TearDown() {
@@ -49,11 +53,12 @@
std::string getCachefileName();
- egl_cache_t* mCache;
+ egl_cache_t* mCache = egl_cache_t::get();
std::unique_ptr<TemporaryFile> mTempFile;
+ egl_cache_t::EGLCacheMode mCacheMode = GetParam();
};
-TEST_F(EGLCacheTest, UninitializedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, UninitializedCacheAlwaysMisses) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->setBlob("abcd", 4, "efgh", 4);
ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf, 4));
@@ -63,7 +68,7 @@
ASSERT_EQ(0xee, buf[3]);
}
-TEST_F(EGLCacheTest, InitializedCacheAlwaysHits) {
+TEST_P(EGLCacheTest, InitializedCacheAlwaysHits) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
mCache->setBlob("abcd", 4, "efgh", 4);
@@ -74,7 +79,7 @@
ASSERT_EQ('h', buf[3]);
}
-TEST_F(EGLCacheTest, TerminatedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, TerminatedCacheAlwaysMisses) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
mCache->setBlob("abcd", 4, "efgh", 4);
@@ -86,7 +91,7 @@
ASSERT_EQ(0xee, buf[3]);
}
-TEST_F(EGLCacheTest, ReinitializedCacheContainsValues) {
+TEST_P(EGLCacheTest, ReinitializedCacheContainsValues) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
mCache->setBlob("abcd", 4, "efgh", 4);
@@ -101,12 +106,12 @@
std::string EGLCacheTest::getCachefileName() {
// Return the monolithic filename unless we find the multifile dir
- std::string cachefileName = &mTempFile->path[0];
- std::string multifileDirName = cachefileName + ".multifile";
+ std::string cachePath = &mTempFile->path[0];
+ std::string multifileDirName = cachePath + ".multifile";
+ std::string cachefileName = "";
struct stat info;
if (stat(multifileDirName.c_str(), &info) == 0) {
-
// Ensure we only have one file to manage
int realFileCount = 0;
@@ -121,6 +126,9 @@
cachefileName = multifileDirName + "/" + entry->d_name;
realFileCount++;
}
+ } else {
+ printf("Unable to open %s, error: %s\n",
+ multifileDirName.c_str(), std::strerror(errno));
}
if (realFileCount != 1) {
@@ -128,14 +136,19 @@
// violates test assumptions
cachefileName = "";
}
+ } else {
+ printf("Unable to stat %s, error: %s\n",
+ multifileDirName.c_str(), std::strerror(errno));
}
return cachefileName;
}
-TEST_F(EGLCacheTest, ModifiedCacheMisses) {
- // Turn this back on if multifile becomes the default
- GTEST_SKIP() << "Skipping test designed for multifile, see b/263574392 and b/246966894";
+TEST_P(EGLCacheTest, ModifiedCacheMisses) {
+ // Skip if not in multifile mode
+ if (mCacheMode == egl_cache_t::EGLCacheMode::Monolithic) {
+ GTEST_SKIP() << "Skipping test designed for multifile";
+ }
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
@@ -147,13 +160,13 @@
ASSERT_EQ('g', buf[2]);
ASSERT_EQ('h', buf[3]);
+ // Ensure the cache file is written to disk
+ mCache->terminate();
+
// Depending on the cache mode, the file will be in different locations
std::string cachefileName = getCachefileName();
ASSERT_TRUE(cachefileName.length() > 0);
- // Ensure the cache file is written to disk
- mCache->terminate();
-
// Stomp on the beginning of the cache file, breaking the key match
const long stomp = 0xbadf00d;
FILE *file = fopen(cachefileName.c_str(), "w");
@@ -164,14 +177,15 @@
// Ensure no cache hit
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
uint8_t buf2[4] = { 0xee, 0xee, 0xee, 0xee };
- ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf2, 4));
+ // getBlob may return junk for required size, but should not return a cache hit
+ mCache->getBlob("abcd", 4, buf2, 4);
ASSERT_EQ(0xee, buf2[0]);
ASSERT_EQ(0xee, buf2[1]);
ASSERT_EQ(0xee, buf2[2]);
ASSERT_EQ(0xee, buf2[3]);
}
-TEST_F(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
+TEST_P(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
@@ -204,4 +218,8 @@
ASSERT_LE(mCache->getCacheSize(), 4);
}
+INSTANTIATE_TEST_CASE_P(MonolithicCacheTests,
+ EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Monolithic));
+INSTANTIATE_TEST_CASE_P(MultifileCacheTests,
+ EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Multifile));
}
diff --git a/services/inputflinger/dispatcher/InputDispatcher.cpp b/services/inputflinger/dispatcher/InputDispatcher.cpp
index 079b80d..d14a781 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.cpp
+++ b/services/inputflinger/dispatcher/InputDispatcher.cpp
@@ -4185,6 +4185,17 @@
bool needWake = false;
{ // acquire lock
mLock.lock();
+ if (!(policyFlags & POLICY_FLAG_PASS_TO_USER)) {
+ // Set the flag anyway if we already have an ongoing gesture. That would allow us to
+ // complete the processing of the current stroke.
+ const auto touchStateIt = mTouchStatesByDisplay.find(args->displayId);
+ if (touchStateIt != mTouchStatesByDisplay.end()) {
+ const TouchState& touchState = touchStateIt->second;
+ if (touchState.deviceId == args->deviceId && touchState.isDown()) {
+ policyFlags |= POLICY_FLAG_PASS_TO_USER;
+ }
+ }
+ }
if (shouldSendMotionToInputFilterLocked(args)) {
ui::Transform displayTransform;
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index a2fe911..3605be2 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -1567,6 +1567,113 @@
std::vector<PointerBuilder> mPointers;
};
+class MotionArgsBuilder {
+public:
+ MotionArgsBuilder(int32_t action, int32_t source) {
+ mAction = action;
+ mSource = source;
+ mEventTime = systemTime(SYSTEM_TIME_MONOTONIC);
+ mDownTime = mEventTime;
+ }
+
+ MotionArgsBuilder& deviceId(int32_t deviceId) {
+ mDeviceId = deviceId;
+ return *this;
+ }
+
+ MotionArgsBuilder& downTime(nsecs_t downTime) {
+ mDownTime = downTime;
+ return *this;
+ }
+
+ MotionArgsBuilder& eventTime(nsecs_t eventTime) {
+ mEventTime = eventTime;
+ return *this;
+ }
+
+ MotionArgsBuilder& displayId(int32_t displayId) {
+ mDisplayId = displayId;
+ return *this;
+ }
+
+ MotionArgsBuilder& policyFlags(int32_t policyFlags) {
+ mPolicyFlags = policyFlags;
+ return *this;
+ }
+
+ MotionArgsBuilder& actionButton(int32_t actionButton) {
+ mActionButton = actionButton;
+ return *this;
+ }
+
+ MotionArgsBuilder& buttonState(int32_t buttonState) {
+ mButtonState = buttonState;
+ return *this;
+ }
+
+ MotionArgsBuilder& rawXCursorPosition(float rawXCursorPosition) {
+ mRawXCursorPosition = rawXCursorPosition;
+ return *this;
+ }
+
+ MotionArgsBuilder& rawYCursorPosition(float rawYCursorPosition) {
+ mRawYCursorPosition = rawYCursorPosition;
+ return *this;
+ }
+
+ MotionArgsBuilder& pointer(PointerBuilder pointer) {
+ mPointers.push_back(pointer);
+ return *this;
+ }
+
+ MotionArgsBuilder& addFlag(uint32_t flags) {
+ mFlags |= flags;
+ return *this;
+ }
+
+ NotifyMotionArgs build() {
+ std::vector<PointerProperties> pointerProperties;
+ std::vector<PointerCoords> pointerCoords;
+ for (const PointerBuilder& pointer : mPointers) {
+ pointerProperties.push_back(pointer.buildProperties());
+ pointerCoords.push_back(pointer.buildCoords());
+ }
+
+ // Set mouse cursor position for the most common cases to avoid boilerplate.
+ if (mSource == AINPUT_SOURCE_MOUSE &&
+ !MotionEvent::isValidCursorPosition(mRawXCursorPosition, mRawYCursorPosition) &&
+ mPointers.size() == 1) {
+ mRawXCursorPosition = pointerCoords[0].getX();
+ mRawYCursorPosition = pointerCoords[0].getY();
+ }
+
+ NotifyMotionArgs args(InputEvent::nextId(), mEventTime, /*readTime=*/mEventTime, mDeviceId,
+ mSource, mDisplayId, mPolicyFlags, mAction, mActionButton, mFlags,
+ AMETA_NONE, mButtonState, MotionClassification::NONE, /*edgeFlags=*/0,
+ mPointers.size(), pointerProperties.data(), pointerCoords.data(),
+ /*xPrecision=*/0, /*yPrecision=*/0, mRawXCursorPosition,
+ mRawYCursorPosition, mDownTime, /*videoFrames=*/{});
+
+ return args;
+ }
+
+private:
+ int32_t mAction;
+ int32_t mDeviceId = DEVICE_ID;
+ int32_t mSource;
+ nsecs_t mDownTime;
+ nsecs_t mEventTime;
+ int32_t mDisplayId{ADISPLAY_ID_DEFAULT};
+ int32_t mPolicyFlags = DEFAULT_POLICY_FLAGS;
+ int32_t mActionButton{0};
+ int32_t mButtonState{0};
+ int32_t mFlags{0};
+ float mRawXCursorPosition{AMOTION_EVENT_INVALID_CURSOR_POSITION};
+ float mRawYCursorPosition{AMOTION_EVENT_INVALID_CURSOR_POSITION};
+
+ std::vector<PointerBuilder> mPointers;
+};
+
static InputEventInjectionResult injectMotionEvent(
const std::unique_ptr<InputDispatcher>& dispatcher, const MotionEvent& event,
std::chrono::milliseconds injectionTimeout = INJECT_EVENT_TIMEOUT,
@@ -2055,6 +2162,92 @@
}
/**
+ * The policy typically sets POLICY_FLAG_PASS_TO_USER to the events. But when the display is not
+ * interactive, it might stop sending this flag.
+ * In this test, we check that if the policy stops sending this flag mid-gesture, we still ensure
+ * to have a consistent input stream.
+ *
+ * Test procedure:
+ * DOWN -> POINTER_DOWN -> (stop sending POLICY_FLAG_PASS_TO_USER) -> CANCEL.
+ * DOWN (new gesture).
+ *
+ * In the bad implementation, we could potentially drop the CANCEL event, and get an inconsistent
+ * state in the dispatcher. This would cause the final DOWN event to not be delivered to the app.
+ *
+ * We technically just need a single window here, but we are using two windows (spy on top and a
+ * regular window below) to emulate the actual situation where it happens on the device.
+ */
+TEST_F(InputDispatcherTest, TwoPointerCancelInconsistentPolicy) {
+ std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
+ sp<FakeWindowHandle> spyWindow =
+ sp<FakeWindowHandle>::make(application, mDispatcher, "Spy", ADISPLAY_ID_DEFAULT);
+ spyWindow->setFrame(Rect(0, 0, 200, 200));
+ spyWindow->setTrustedOverlay(true);
+ spyWindow->setSpy(true);
+
+ sp<FakeWindowHandle> window =
+ sp<FakeWindowHandle>::make(application, mDispatcher, "Window", ADISPLAY_ID_DEFAULT);
+ window->setFrame(Rect(0, 0, 200, 200));
+
+ mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {spyWindow, window}}});
+ const int32_t touchDeviceId = 4;
+ NotifyMotionArgs args;
+
+ // Two pointers down
+ mDispatcher->notifyMotion(&(
+ args = MotionArgsBuilder(AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .policyFlags(DEFAULT_POLICY_FLAGS)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+ .build()));
+
+ mDispatcher->notifyMotion(&(
+ args = MotionArgsBuilder(POINTER_1_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .policyFlags(DEFAULT_POLICY_FLAGS)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+ .pointer(PointerBuilder(1, AMOTION_EVENT_TOOL_TYPE_FINGER).x(120).y(120))
+ .build()));
+ spyWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+ spyWindow->consumeMotionEvent(WithMotionAction(POINTER_1_DOWN));
+ window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+ window->consumeMotionEvent(WithMotionAction(POINTER_1_DOWN));
+
+ // Cancel the current gesture. Send the cancel without the default policy flags.
+ mDispatcher->notifyMotion(&(
+ args = MotionArgsBuilder(AMOTION_EVENT_ACTION_CANCEL, AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .policyFlags(0)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+ .pointer(PointerBuilder(1, AMOTION_EVENT_TOOL_TYPE_FINGER).x(120).y(120))
+ .build()));
+ spyWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_CANCEL));
+ window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_CANCEL));
+
+ // We don't need to reset the device to reproduce the issue, but the reset event typically
+ // follows, so we keep it here to model the actual listener behaviour more closely.
+ NotifyDeviceResetArgs resetArgs;
+ resetArgs.id = 1; // arbitrary id
+ resetArgs.eventTime = systemTime(SYSTEM_TIME_MONOTONIC);
+ resetArgs.deviceId = touchDeviceId;
+ mDispatcher->notifyDeviceReset(&resetArgs);
+
+ // Start new gesture
+ mDispatcher->notifyMotion(&(
+ args = MotionArgsBuilder(AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+ .deviceId(touchDeviceId)
+ .policyFlags(DEFAULT_POLICY_FLAGS)
+ .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+ .build()));
+ spyWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+ window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+
+ // No more events
+ spyWindow->assertNoEvents();
+ window->assertNoEvents();
+}
+
+/**
* Two windows: a window on the left and a window on the right.
* Mouse is hovered from the right window into the left window.
* Next, we tap on the left window, where the cursor was last seen.
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
index 3ed24b2..6490476 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
@@ -391,9 +391,7 @@
void LayerSnapshotBuilder::updateSnapshots(const Args& args) {
ATRACE_NAME("UpdateSnapshots");
- if (args.parentCrop) {
- mRootSnapshot.geomLayerBounds = *args.parentCrop;
- } else if (args.forceUpdate || args.displayChanges) {
+ if (args.forceUpdate || args.displayChanges) {
mRootSnapshot.geomLayerBounds = getMaxDisplayBounds(args.displays);
}
if (args.displayChanges) {
@@ -620,8 +618,7 @@
RequestedLayerState::Changes::AffectsChildren);
snapshot.changes = parentChanges | requested.changes;
snapshot.isHiddenByPolicyFromParent = parentSnapshot.isHiddenByPolicyFromParent ||
- parentSnapshot.invalidTransform || requested.isHiddenByPolicy() ||
- (args.excludeLayerIds.find(path.id) != args.excludeLayerIds.end());
+ parentSnapshot.invalidTransform || requested.isHiddenByPolicy();
snapshot.contentDirty = requested.what & layer_state_t::CONTENT_DIRTY;
// TODO(b/238781169) scope down the changes to only buffer updates.
snapshot.hasReadyFrame =
@@ -986,20 +983,6 @@
}
}
-// Visit each visible snapshot in z-order
-void LayerSnapshotBuilder::forEachVisibleSnapshot(const ConstVisitor& visitor,
- const LayerHierarchy& root) const {
- root.traverseInZOrder(
- [this, visitor](const LayerHierarchy&,
- const LayerHierarchy::TraversalPath& traversalPath) -> bool {
- LayerSnapshot* snapshot = getSnapshot(traversalPath);
- if (snapshot && snapshot->isVisible) {
- visitor(*snapshot);
- }
- return true;
- });
-}
-
void LayerSnapshotBuilder::forEachVisibleSnapshot(const Visitor& visitor) {
for (int i = 0; i < mNumInterestingSnapshots; i++) {
std::unique_ptr<LayerSnapshot>& snapshot = mSnapshots.at((size_t)i);
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
index f4544fd..abb7e66 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
@@ -36,7 +36,7 @@
class LayerSnapshotBuilder {
public:
struct Args {
- LayerHierarchy root;
+ const LayerHierarchy& root;
const LayerLifecycleManager& layerLifecycleManager;
bool forceUpdate = false;
bool includeMetadata = false;
@@ -46,8 +46,6 @@
const renderengine::ShadowSettings& globalShadowSettings;
bool supportsBlur = true;
bool forceFullDamage = false;
- std::optional<FloatRect> parentCrop = std::nullopt;
- std::unordered_set<uint32_t> excludeLayerIds;
};
LayerSnapshotBuilder();
@@ -67,9 +65,6 @@
// Visit each visible snapshot in z-order
void forEachVisibleSnapshot(const ConstVisitor& visitor) const;
- // Visit each visible snapshot in z-order
- void forEachVisibleSnapshot(const ConstVisitor& visitor, const LayerHierarchy& root) const;
-
typedef std::function<void(std::unique_ptr<LayerSnapshot>& snapshot)> Visitor;
// Visit each visible snapshot in z-order and move the snapshot if needed
void forEachVisibleSnapshot(const Visitor& visitor);
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 8c484f0..d1b5fcc 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -146,7 +146,7 @@
mLayerCreationFlags(args.flags),
mBorderEnabled(false),
mTextureName(args.textureName),
- mLegacyLayerFE(args.flinger->getFactory().createLayerFE(mName)) {
+ mLayerFE(args.flinger->getFactory().createLayerFE(mName)) {
ALOGV("Creating Layer %s", getDebugName());
uint32_t layerFlags = 0;
@@ -3119,14 +3119,15 @@
return true;
}
-bool Layer::setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& handles,
- bool willPresent) {
+bool Layer::setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& handles) {
// If there is no handle, we will not send a callback so reset mReleasePreviousBuffer and return
if (handles.empty()) {
mReleasePreviousBuffer = false;
return false;
}
+ const bool willPresent = willPresentCurrentTransaction();
+
std::deque<sp<CallbackHandle>> remainingHandles;
for (const auto& handle : handles) {
// If this transaction set a buffer on this layer, release its previous buffer
@@ -3209,10 +3210,11 @@
return fenceSignaled;
}
-void Layer::onPreComposition(nsecs_t refreshStartTime) {
+bool Layer::onPreComposition(nsecs_t refreshStartTime) {
for (const auto& handle : mDrawingState.callbackHandles) {
handle->refreshStartTime = refreshStartTime;
}
+ return hasReadyFrame();
}
void Layer::setAutoRefresh(bool autoRefresh) {
@@ -3598,7 +3600,7 @@
sp<LayerFE> Layer::getCompositionEngineLayerFE() const {
// There's no need to get a CE Layer if the layer isn't going to draw anything.
- return hasSomethingToDraw() ? mLegacyLayerFE : nullptr;
+ return hasSomethingToDraw() ? mLayerFE : nullptr;
}
const LayerSnapshot* Layer::getLayerSnapshot() const {
@@ -3609,36 +3611,16 @@
return mSnapshot.get();
}
-std::unique_ptr<frontend::LayerSnapshot> Layer::stealLayerSnapshot() {
- return std::move(mSnapshot);
-}
-
-void Layer::updateLayerSnapshot(std::unique_ptr<frontend::LayerSnapshot> snapshot) {
- mSnapshot = std::move(snapshot);
-}
-
const compositionengine::LayerFECompositionState* Layer::getCompositionState() const {
return mSnapshot.get();
}
sp<LayerFE> Layer::copyCompositionEngineLayerFE() const {
- auto result = mFlinger->getFactory().createLayerFE(mName);
+ auto result = mFlinger->getFactory().createLayerFE(mLayerFE->getDebugName());
result->mSnapshot = std::make_unique<LayerSnapshot>(*mSnapshot);
return result;
}
-sp<LayerFE> Layer::getCompositionEngineLayerFE(
- const frontend::LayerHierarchy::TraversalPath& path) {
- for (auto& [p, layerFE] : mLayerFEs) {
- if (p == path) {
- return layerFE;
- }
- }
- auto layerFE = mFlinger->getFactory().createLayerFE(mName);
- mLayerFEs.emplace_back(path, layerFE);
- return layerFE;
-}
-
void Layer::useSurfaceDamage() {
if (mFlinger->mForceFullDamage) {
surfaceDamageRegion = Region::INVALID_REGION;
@@ -4034,6 +4016,28 @@
}
}
+LayerSnapshotGuard::LayerSnapshotGuard(Layer* layer) : mLayer(layer) {
+ if (mLayer) {
+ mLayer->mLayerFE->mSnapshot = std::move(mLayer->mSnapshot);
+ }
+}
+
+LayerSnapshotGuard::~LayerSnapshotGuard() {
+ if (mLayer) {
+ mLayer->mSnapshot = std::move(mLayer->mLayerFE->mSnapshot);
+ }
+}
+
+LayerSnapshotGuard::LayerSnapshotGuard(LayerSnapshotGuard&& other) : mLayer(other.mLayer) {
+ other.mLayer = nullptr;
+}
+
+LayerSnapshotGuard& LayerSnapshotGuard::operator=(LayerSnapshotGuard&& other) {
+ mLayer = other.mLayer;
+ other.mLayer = nullptr;
+ return *this;
+}
+
void Layer::setTrustedPresentationInfo(TrustedPresentationThresholds const& thresholds,
TrustedPresentationListener const& listener) {
bool hadTrustedPresentationListener = hasTrustedPresentationListener();
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index bf8cc7e..e2bae23 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -309,8 +309,7 @@
bool setSurfaceDamageRegion(const Region& /*surfaceDamage*/);
bool setApi(int32_t /*api*/);
bool setSidebandStream(const sp<NativeHandle>& /*sidebandStream*/);
- bool setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& /*handles*/,
- bool willPresent);
+ bool setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& /*handles*/);
virtual bool setBackgroundColor(const half3& color, float alpha, ui::Dataspace dataspace);
virtual bool setColorSpaceAgnostic(const bool agnostic);
virtual bool setDimmingEnabled(const bool dimmingEnabled);
@@ -331,12 +330,9 @@
virtual sp<LayerFE> getCompositionEngineLayerFE() const;
virtual sp<LayerFE> copyCompositionEngineLayerFE() const;
- sp<LayerFE> getCompositionEngineLayerFE(const frontend::LayerHierarchy::TraversalPath&);
const frontend::LayerSnapshot* getLayerSnapshot() const;
frontend::LayerSnapshot* editLayerSnapshot();
- std::unique_ptr<frontend::LayerSnapshot> stealLayerSnapshot();
- void updateLayerSnapshot(std::unique_ptr<frontend::LayerSnapshot> snapshot);
// If we have received a new buffer this frame, we will pass its surface
// damage down to hardware composer. Otherwise, we must send a region with
@@ -518,7 +514,7 @@
// implements compositionengine::LayerFE
const compositionengine::LayerFECompositionState* getCompositionState() const;
bool fenceHasSignaled() const;
- void onPreComposition(nsecs_t refreshStartTime);
+ bool onPreComposition(nsecs_t refreshStartTime);
void onLayerDisplayed(ftl::SharedFuture<FenceResult>);
void setWasClientComposed(const sp<Fence>& fence) {
@@ -838,7 +834,6 @@
void updateMetadataSnapshot(const LayerMetadata& parentMetadata);
void updateRelativeMetadataSnapshot(const LayerMetadata& relativeLayerMetadata,
std::unordered_set<Layer*>& visited);
- bool willPresentCurrentTransaction() const;
void callReleaseBufferCallback(const sp<ITransactionCompletedListener>& listener,
const sp<GraphicBuffer>& buffer, uint64_t framenumber,
@@ -1048,6 +1043,8 @@
// Crop that applies to the buffer
Rect computeBufferCrop(const State& s);
+ bool willPresentCurrentTransaction() const;
+
void callReleaseBufferCallback(const sp<ITransactionCompletedListener>& listener,
const sp<GraphicBuffer>& buffer, uint64_t framenumber,
const sp<Fence>& releaseFence,
@@ -1164,10 +1161,34 @@
// not specify a destination frame.
ui::Transform mRequestedTransform;
- sp<LayerFE> mLegacyLayerFE;
- std::vector<std::pair<frontend::LayerHierarchy::TraversalPath, sp<LayerFE>>> mLayerFEs;
+ sp<LayerFE> mLayerFE;
std::unique_ptr<frontend::LayerSnapshot> mSnapshot =
std::make_unique<frontend::LayerSnapshot>();
+
+ friend class LayerSnapshotGuard;
+};
+
+// LayerSnapshotGuard manages the movement of LayerSnapshot between a Layer and its corresponding
+// LayerFE. This class must be used whenever LayerFEs are passed to CompositionEngine. Instances of
+// LayerSnapshotGuard should only be constructed on the main thread and should not be moved outside
+// the main thread.
+//
+// Moving the snapshot instead of sharing common state prevents use of LayerFE outside the main
+// thread by making errors obvious (i.e. use outside the main thread results in SEGFAULTs due to
+// nullptr dereference).
+class LayerSnapshotGuard {
+public:
+ LayerSnapshotGuard(Layer* layer) REQUIRES(kMainThreadContext);
+ ~LayerSnapshotGuard() REQUIRES(kMainThreadContext);
+
+ LayerSnapshotGuard(const LayerSnapshotGuard&) = delete;
+ LayerSnapshotGuard& operator=(const LayerSnapshotGuard&) = delete;
+
+ LayerSnapshotGuard(LayerSnapshotGuard&& other) REQUIRES(kMainThreadContext);
+ LayerSnapshotGuard& operator=(LayerSnapshotGuard&& other) REQUIRES(kMainThreadContext);
+
+private:
+ Layer* mLayer;
};
std::ostream& operator<<(std::ostream& stream, const Layer::FrameRate& rate);
diff --git a/services/surfaceflinger/LayerRenderArea.cpp b/services/surfaceflinger/LayerRenderArea.cpp
index 03a7f22..2b4375b 100644
--- a/services/surfaceflinger/LayerRenderArea.cpp
+++ b/services/surfaceflinger/LayerRenderArea.cpp
@@ -69,14 +69,6 @@
void LayerRenderArea::render(std::function<void()> drawLayers) {
using namespace std::string_literals;
- if (!mChildrenOnly) {
- mTransform = mLayer->getTransform().inverse();
- }
-
- if (mFlinger.mLayerLifecycleManagerEnabled) {
- drawLayers();
- return;
- }
// If layer is offscreen, update mirroring info if it exists
if (mLayer->isRemovedFromCurrentState()) {
mLayer->traverse(LayerVector::StateSet::Drawing,
@@ -86,6 +78,7 @@
}
if (!mChildrenOnly) {
+ mTransform = mLayer->getTransform().inverse();
// If the layer is offscreen, compute bounds since we don't compute bounds for offscreen
// layers in a regular cycles.
if (mLayer->isRemovedFromCurrentState()) {
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index e74656e..c7c91ce 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -173,8 +173,6 @@
using namespace hardware::configstore;
using namespace hardware::configstore::V1_0;
using namespace sysprop;
-using ftl::Flags;
-using namespace ftl::flag_operators;
using aidl::android::hardware::graphics::common::DisplayDecorationSupport;
using aidl::android::hardware::graphics::composer3::Capability;
@@ -472,10 +470,6 @@
mPowerHintSessionMode =
{.late = base::GetBoolProperty("debug.sf.send_late_power_session_hint"s, true),
.early = base::GetBoolProperty("debug.sf.send_early_power_session_hint"s, false)};
- mLayerLifecycleManagerEnabled =
- base::GetBoolProperty("debug.sf.enable_layer_lifecycle_manager"s, false);
- mLegacyFrontEndEnabled = !mLayerLifecycleManagerEnabled ||
- base::GetBoolProperty("debug.sf.enable_legacy_frontend"s, true);
}
LatchUnsignaledConfig SurfaceFlinger::getLatchUnsignaledConfig() {
@@ -2133,110 +2127,6 @@
}
}
-bool SurfaceFlinger::updateLayerSnapshotsLegacy(VsyncId vsyncId, LifecycleUpdate& update,
- bool transactionsFlushed,
- bool& outTransactionsAreEmpty) {
- bool needsTraversal = false;
- if (transactionsFlushed) {
- needsTraversal |= commitMirrorDisplays(vsyncId);
- needsTraversal |= commitCreatedLayers(vsyncId, update.layerCreatedStates);
- needsTraversal |= applyTransactions(update.transactions, vsyncId);
- }
- outTransactionsAreEmpty = !needsTraversal;
- const bool shouldCommit = (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
- if (shouldCommit) {
- commitTransactions();
- }
-
- bool mustComposite = latchBuffers() || shouldCommit;
- updateLayerGeometry();
- return mustComposite;
-}
-
-bool SurfaceFlinger::updateLayerSnapshots(VsyncId vsyncId, LifecycleUpdate& update,
- bool transactionsFlushed, bool& outTransactionsAreEmpty) {
- using Changes = frontend::RequestedLayerState::Changes;
- ATRACE_NAME("updateLayerSnapshots");
- {
- mLayerLifecycleManager.addLayers(std::move(update.newLayers));
- mLayerLifecycleManager.applyTransactions(update.transactions);
- mLayerLifecycleManager.onHandlesDestroyed(update.destroyedHandles);
- for (auto& legacyLayer : update.layerCreatedStates) {
- sp<Layer> layer = legacyLayer.layer.promote();
- if (layer) {
- mLegacyLayers[layer->sequence] = layer;
- }
- }
- }
- if (mLayerLifecycleManager.getGlobalChanges().test(Changes::Hierarchy)) {
- ATRACE_NAME("LayerHierarchyBuilder:update");
- mLayerHierarchyBuilder.update(mLayerLifecycleManager.getLayers(),
- mLayerLifecycleManager.getDestroyedLayers());
- }
-
- applyAndCommitDisplayTransactionStates(update.transactions);
-
- {
- ATRACE_NAME("LayerSnapshotBuilder:update");
- frontend::LayerSnapshotBuilder::Args args{.root = mLayerHierarchyBuilder.getHierarchy(),
- .layerLifecycleManager = mLayerLifecycleManager,
- .displays = mFrontEndDisplayInfos,
- .displayChanges = mFrontEndDisplayInfosChanged,
- .globalShadowSettings =
- mDrawingState.globalShadowSettings,
- .supportsBlur = mSupportsBlur,
- .forceFullDamage = mForceFullDamage};
- mLayerSnapshotBuilder.update(args);
- }
-
- if (mLayerLifecycleManager.getGlobalChanges().any(Changes::Geometry | Changes::Input |
- Changes::Hierarchy)) {
- mUpdateInputInfo = true;
- }
- if (mLayerLifecycleManager.getGlobalChanges().any(Changes::VisibleRegion | Changes::Hierarchy |
- Changes::Visibility)) {
- mVisibleRegionsDirty = true;
- }
- outTransactionsAreEmpty = mLayerLifecycleManager.getGlobalChanges().get() == 0;
- const bool mustComposite = mLayerLifecycleManager.getGlobalChanges().get() != 0;
- {
- ATRACE_NAME("LLM:commitChanges");
- mLayerLifecycleManager.commitChanges();
- }
-
- if (!mLegacyFrontEndEnabled) {
- ATRACE_NAME("DisplayCallbackAndStatsUpdates");
- applyTransactions(update.transactions, vsyncId);
-
- bool newDataLatched = false;
- for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
- if (!snapshot->changes.test(Changes::Buffer)) continue;
- auto it = mLegacyLayers.find(snapshot->sequence);
- LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(), "Couldnt find layer object for %s",
- snapshot->getDebugString().c_str());
- mLayersWithQueuedFrames.emplace(it->second);
- newDataLatched = true;
- if (!snapshot->isVisible) break;
-
- Region visibleReg;
- visibleReg.set(snapshot->transformedBoundsWithoutTransparentRegion);
- invalidateLayerStack(snapshot->outputFilter, visibleReg);
- }
-
- for (auto& destroyedLayer : mLayerLifecycleManager.getDestroyedLayers()) {
- mLegacyLayers.erase(destroyedLayer->id);
- }
-
- // enter boot animation on first buffer latch
- if (CC_UNLIKELY(mBootStage == BootStage::BOOTLOADER && newDataLatched)) {
- ALOGI("Enter boot animation");
- mBootStage = BootStage::BOOTANIMATION;
- }
- commitTransactions();
- }
- return mustComposite;
-}
-
bool SurfaceFlinger::commit(TimePoint frameTime, VsyncId vsyncId, TimePoint expectedVsyncTime)
FTL_FAKE_GUARD(kMainThreadContext) {
// The expectedVsyncTime, which was predicted when this frame was scheduled, is normally in the
@@ -2376,34 +2266,45 @@
mFrameTimeline->setSfWakeUp(vsyncId.value, frameTime.ns(),
Fps::fromPeriodNsecs(vsyncPeriod.ns()));
- const bool flushTransactions = clearTransactionFlags(eTransactionFlushNeeded);
- LifecycleUpdate updates;
- if (flushTransactions) {
- updates = flushLifecycleUpdates();
+ bool needsTraversal = false;
+ if (clearTransactionFlags(eTransactionFlushNeeded)) {
+ // Locking:
+ // 1. to prevent onHandleDestroyed from being called while the state lock is held,
+ // we must keep a copy of the transactions (specifically the composer
+ // states) around outside the scope of the lock.
+ // 2. Transactions and created layers do not share a lock. To prevent applying
+ // transactions with layers still in the createdLayer queue, flush the transactions
+ // before committing the created layers.
+ std::vector<TransactionState> transactions = mTransactionHandler.flushTransactions();
+ needsTraversal |= commitMirrorDisplays(vsyncId);
+ needsTraversal |= commitCreatedLayers(vsyncId);
+ needsTraversal |= applyTransactions(transactions, vsyncId);
}
- bool transactionsAreEmpty;
- if (mLegacyFrontEndEnabled) {
- mustComposite |= updateLayerSnapshotsLegacy(vsyncId, updates, flushTransactions,
- transactionsAreEmpty);
- }
- if (mLayerLifecycleManagerEnabled) {
- mustComposite |=
- updateLayerSnapshots(vsyncId, updates, flushTransactions, transactionsAreEmpty);
+
+ const bool shouldCommit =
+ (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
+ if (shouldCommit) {
+ commitTransactions();
}
if (transactionFlushNeeded()) {
setTransactionFlags(eTransactionFlushNeeded);
}
+ mustComposite |= shouldCommit;
+ mustComposite |= latchBuffers();
+
// This has to be called after latchBuffers because we want to include the layers that have
// been latched in the commit callback
- if (transactionsAreEmpty) {
+ if (!needsTraversal) {
// Invoke empty transaction callbacks early.
mTransactionCallbackInvoker.sendCallbacks(false /* onCommitOnly */);
} else {
// Invoke OnCommit callbacks.
mTransactionCallbackInvoker.sendCallbacks(true /* onCommitOnly */);
}
+
+ updateLayerGeometry();
}
// Layers need to get updated (in the previous line) before we can use them for
@@ -2490,6 +2391,15 @@
refreshArgs.updatingOutputGeometryThisFrame = mVisibleRegionsDirty;
refreshArgs.updatingGeometryThisFrame = mGeometryDirty.exchange(false) || mVisibleRegionsDirty;
+ std::vector<Layer*> layers;
+
+ mDrawingState.traverseInZOrder([&refreshArgs, &layers](Layer* layer) {
+ if (auto layerFE = layer->getCompositionEngineLayerFE()) {
+ layer->updateSnapshot(refreshArgs.updatingGeometryThisFrame);
+ refreshArgs.layers.push_back(layerFE);
+ layers.push_back(layer);
+ }
+ });
refreshArgs.internalDisplayRotationFlags = DisplayDevice::getPrimaryDisplayRotationFlags();
if (CC_UNLIKELY(mDrawingState.colorMatrixChanged)) {
@@ -2516,13 +2426,17 @@
// the scheduler.
const auto presentTime = systemTime();
- std::vector<std::pair<Layer*, LayerFE*>> layers =
- moveSnapshotsToCompositionArgs(refreshArgs, /*cursorOnly=*/false, vsyncId.value);
- mCompositionEngine->present(refreshArgs);
- moveSnapshotsFromCompositionArgs(refreshArgs, layers);
+ {
+ std::vector<LayerSnapshotGuard> layerSnapshotGuards;
+ for (Layer* layer : layers) {
+ layerSnapshotGuards.emplace_back(layer);
+ }
+ mCompositionEngine->present(refreshArgs);
+ }
- for (auto [layer, layerFE] : layers) {
- CompositionResult compositionResult{layerFE->stealCompositionResult()};
+ for (auto& layer : layers) {
+ CompositionResult compositionResult{
+ layer->getCompositionEngineLayerFE()->stealCompositionResult()};
layer->onPreComposition(compositionResult.refreshStartTime);
for (auto releaseFence : compositionResult.releaseFences) {
layer->onLayerDisplayed(releaseFence);
@@ -2616,7 +2530,7 @@
for (auto& layer : mLayersPendingRefresh) {
Region visibleReg;
visibleReg.set(layer->getScreenBounds());
- invalidateLayerStack(layer->getOutputFilter(), visibleReg);
+ invalidateLayerStack(layer, visibleReg);
}
mLayersPendingRefresh.clear();
}
@@ -3473,8 +3387,7 @@
void SurfaceFlinger::commitTransactionsLocked(uint32_t transactionFlags) {
// Commit display transactions.
const bool displayTransactionNeeded = transactionFlags & eDisplayTransactionNeeded;
- mFrontEndDisplayInfosChanged = displayTransactionNeeded;
- if (displayTransactionNeeded && !mLayerLifecycleManagerEnabled) {
+ if (displayTransactionNeeded) {
processDisplayChangesLocked();
mFrontEndDisplayInfos.clear();
for (const auto& [_, display] : mDisplays) {
@@ -3565,7 +3478,7 @@
// this layer is not visible anymore
Region visibleReg;
visibleReg.set(layer->getScreenBounds());
- invalidateLayerStack(layer->getOutputFilter(), visibleReg);
+ invalidateLayerStack(sp<Layer>::fromExisting(layer), visibleReg);
}
});
}
@@ -3653,23 +3566,16 @@
outWindowInfos.reserve(sNumWindowInfos);
sNumWindowInfos = 0;
- if (mLayerLifecycleManagerEnabled) {
- mLayerSnapshotBuilder.forEachInputSnapshot(
- [&outWindowInfos](const frontend::LayerSnapshot& snapshot) {
- outWindowInfos.push_back(snapshot.inputInfo);
- });
- } else {
- mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
- if (!layer->needsInputInfo()) return;
- const auto opt =
- mFrontEndDisplayInfos.get(layer->getLayerStack())
- .transform([](const frontend::DisplayInfo& info) {
- return Layer::InputDisplayArgs{&info.transform, info.isSecure};
- });
+ mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
+ if (!layer->needsInputInfo()) return;
- outWindowInfos.push_back(layer->fillInputInfo(opt.value_or(Layer::InputDisplayArgs{})));
- });
- }
+ const auto opt = mFrontEndDisplayInfos.get(layer->getLayerStack())
+ .transform([](const frontend::DisplayInfo& info) {
+ return Layer::InputDisplayArgs{&info.transform, info.isSecure};
+ });
+
+ outWindowInfos.push_back(layer->fillInputInfo(opt.value_or(Layer::InputDisplayArgs{})));
+ });
sNumWindowInfos = outWindowInfos.size();
@@ -3686,9 +3592,17 @@
refreshArgs.outputs.push_back(display->getCompositionDisplay());
}
}
- auto layers = moveSnapshotsToCompositionArgs(refreshArgs, /*cursorOnly=*/true, 0);
+
+ std::vector<LayerSnapshotGuard> layerSnapshotGuards;
+ mDrawingState.traverse([&layerSnapshotGuards](Layer* layer) {
+ if (layer->getLayerSnapshot()->compositionType ==
+ aidl::android::hardware::graphics::composer3::Composition::CURSOR) {
+ layer->updateSnapshot(false /* updateGeometry */);
+ layerSnapshotGuards.emplace_back(layer);
+ }
+ });
+
mCompositionEngine->updateCursorAsync(refreshArgs);
- moveSnapshotsFromCompositionArgs(refreshArgs, layers);
}
void SurfaceFlinger::requestDisplayModes(std::vector<display::DisplayModeRequest> modeRequests) {
@@ -3864,10 +3778,10 @@
}
}
-void SurfaceFlinger::invalidateLayerStack(const ui::LayerFilter& layerFilter, const Region& dirty) {
+void SurfaceFlinger::invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty) {
for (const auto& [token, displayDevice] : FTL_FAKE_GUARD(mStateLock, mDisplays)) {
auto display = displayDevice->getCompositionDisplay();
- if (display->includesLayer(layerFilter)) {
+ if (display->includesLayer(layer->getOutputFilter())) {
display->editState().dirtyRegion.orSelf(dirty);
}
}
@@ -3987,7 +3901,6 @@
{
std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
mCreatedLayers.emplace_back(layer, parent, args.addToRoot);
- mNewLayers.emplace_back(std::make_unique<frontend::RequestedLayerState>(args));
}
setTransactionFlags(eTransactionNeeded);
@@ -4355,11 +4268,9 @@
const std::vector<ListenerCallbacks>& listenerCallbacks,
int originPid, int originUid, uint64_t transactionId) {
uint32_t transactionFlags = 0;
- if (!mLayerLifecycleManagerEnabled) {
- for (DisplayState& display : displays) {
- display.sanitize(permissions);
- transactionFlags |= setDisplayStateLocked(display);
- }
+ for (DisplayState& display : displays) {
+ display.sanitize(permissions);
+ transactionFlags |= setDisplayStateLocked(display);
}
// start and end registration for listeners w/ no surface so they can get their callback. Note
@@ -4371,16 +4282,9 @@
uint32_t clientStateFlags = 0;
for (auto& resolvedState : states) {
- if (mLegacyFrontEndEnabled) {
- clientStateFlags |=
- setClientStateLocked(frameTimelineInfo, resolvedState, desiredPresentTime,
- isAutoTimestamp, postTime, permissions, transactionId);
-
- } else /*mLayerLifecycleManagerEnabled*/ {
- clientStateFlags |= updateLayerCallbacksAndStats(frameTimelineInfo, resolvedState,
- desiredPresentTime, isAutoTimestamp,
- postTime, permissions, transactionId);
- }
+ clientStateFlags |=
+ setClientStateLocked(frameTimelineInfo, resolvedState, desiredPresentTime,
+ isAutoTimestamp, postTime, permissions, transactionId);
if ((flags & eAnimation) && resolvedState.state.surface) {
if (const auto layer = LayerHandle::getLayer(resolvedState.state.surface)) {
using LayerUpdateType = scheduler::LayerHistory::LayerUpdateType;
@@ -4413,8 +4317,8 @@
bool needsTraversal = false;
if (transactionFlags) {
- // We are on the main thread, we are about to perform a traversal. Clear the traversal bit
- // so we don't have to wake up again next frame to perform an unnecessary traversal.
+ // We are on the main thread, we are about to preform a traversal. Clear the traversal bit
+ // so we don't have to wake up again next frame to preform an unnecessary traversal.
if (transactionFlags & eTraversalNeeded) {
transactionFlags = transactionFlags & (~eTraversalNeeded);
needsTraversal = true;
@@ -4427,42 +4331,6 @@
return needsTraversal;
}
-bool SurfaceFlinger::applyAndCommitDisplayTransactionStates(
- std::vector<TransactionState>& transactions) {
- Mutex::Autolock _l(mStateLock);
- bool needsTraversal = false;
- uint32_t transactionFlags = 0;
- for (auto& transaction : transactions) {
- for (DisplayState& display : transaction.displays) {
- display.sanitize(transaction.permissions);
- transactionFlags |= setDisplayStateLocked(display);
- }
- }
-
- if (transactionFlags) {
- // We are on the main thread, we are about to perform a traversal. Clear the traversal bit
- // so we don't have to wake up again next frame to perform an unnecessary traversal.
- if (transactionFlags & eTraversalNeeded) {
- transactionFlags = transactionFlags & (~eTraversalNeeded);
- needsTraversal = true;
- }
- if (transactionFlags) {
- setTransactionFlags(transactionFlags);
- }
- }
-
- mFrontEndDisplayInfosChanged = mTransactionFlags & eDisplayTransactionNeeded;
- if (mFrontEndDisplayInfosChanged && !mLegacyFrontEndEnabled) {
- processDisplayChangesLocked();
- mFrontEndDisplayInfos.clear();
- for (const auto& [_, display] : mDisplays) {
- mFrontEndDisplayInfos.try_emplace(display->getLayerStack(), display->getFrontEndInfo());
- }
- }
-
- return needsTraversal;
-}
-
uint32_t SurfaceFlinger::setDisplayStateLocked(const DisplayState& s) {
const ssize_t index = mCurrentState.displays.indexOfKey(s.token);
if (index < 0) return 0;
@@ -4850,8 +4718,7 @@
// Do nothing. Processing the transaction completed listeners currently cause the flush.
}
- if (layer->setTransactionCompletedListeners(callbackHandles,
- layer->willPresentCurrentTransaction())) {
+ if (layer->setTransactionCompletedListeners(callbackHandles)) {
flags |= eTraversalNeeded;
}
@@ -4867,96 +4734,6 @@
return flags;
}
-uint32_t SurfaceFlinger::updateLayerCallbacksAndStats(const FrameTimelineInfo& frameTimelineInfo,
- ResolvedComposerState& composerState,
- int64_t desiredPresentTime,
- bool isAutoTimestamp, int64_t postTime,
- uint32_t permissions,
- uint64_t transactionId) {
- layer_state_t& s = composerState.state;
- s.sanitize(permissions);
- const nsecs_t latchTime = systemTime();
- bool unused;
-
- std::vector<ListenerCallbacks> filteredListeners;
- for (auto& listener : s.listeners) {
- // Starts a registration but separates the callback ids according to callback type. This
- // allows the callback invoker to send on latch callbacks earlier.
- // note that startRegistration will not re-register if the listener has
- // already be registered for a prior surface control
-
- ListenerCallbacks onCommitCallbacks = listener.filter(CallbackId::Type::ON_COMMIT);
- if (!onCommitCallbacks.callbackIds.empty()) {
- filteredListeners.push_back(onCommitCallbacks);
- }
-
- ListenerCallbacks onCompleteCallbacks = listener.filter(CallbackId::Type::ON_COMPLETE);
- if (!onCompleteCallbacks.callbackIds.empty()) {
- filteredListeners.push_back(onCompleteCallbacks);
- }
- }
-
- const uint64_t what = s.what;
- uint32_t flags = 0;
- sp<Layer> layer = nullptr;
- if (s.surface) {
- layer = LayerHandle::getLayer(s.surface);
- } else {
- // The client may provide us a null handle. Treat it as if the layer was removed.
- ALOGW("Attempt to set client state with a null layer handle");
- }
- if (layer == nullptr) {
- for (auto& [listener, callbackIds] : s.listeners) {
- mTransactionCallbackInvoker.addCallbackHandle(sp<CallbackHandle>::make(listener,
- callbackIds,
- s.surface),
- std::vector<JankData>());
- }
- return 0;
- }
- if (what & layer_state_t::eProducerDisconnect) {
- layer->onDisconnect();
- }
- std::optional<nsecs_t> dequeueBufferTimestamp;
- if (what & layer_state_t::eMetadataChanged) {
- dequeueBufferTimestamp = s.metadata.getInt64(gui::METADATA_DEQUEUE_TIME);
- }
-
- std::vector<sp<CallbackHandle>> callbackHandles;
- if ((what & layer_state_t::eHasListenerCallbacksChanged) && (!filteredListeners.empty())) {
- for (auto& [listener, callbackIds] : filteredListeners) {
- callbackHandles.emplace_back(
- sp<CallbackHandle>::make(listener, callbackIds, s.surface));
- }
- }
- if (what & layer_state_t::eSidebandStreamChanged) {
- if (layer->setSidebandStream(s.sidebandStream)) flags |= eTraversalNeeded;
- }
- if (what & layer_state_t::eBufferChanged) {
- if (layer->setBuffer(composerState.externalTexture, *s.bufferData, postTime,
- desiredPresentTime, isAutoTimestamp, dequeueBufferTimestamp,
- frameTimelineInfo)) {
- layer->latchBuffer(unused, latchTime);
- flags |= eTraversalNeeded;
- }
- mLayersWithQueuedFrames.emplace(layer);
- } else if (frameTimelineInfo.vsyncId != FrameTimelineInfo::INVALID_VSYNC_ID) {
- layer->setFrameTimelineVsyncForBufferlessTransaction(frameTimelineInfo, postTime);
- }
-
- if (what & layer_state_t::eTrustedPresentationInfoChanged) {
- layer->setTrustedPresentationInfo(s.trustedPresentationThresholds,
- s.trustedPresentationListener);
- }
-
- const auto& snapshot = mLayerSnapshotBuilder.getSnapshot(layer->getSequence());
- bool willPresentCurrentTransaction =
- snapshot && (snapshot->hasReadyFrame || snapshot->sidebandStreamHasFrame);
- if (layer->setTransactionCompletedListeners(callbackHandles, willPresentCurrentTransaction))
- flags |= eTraversalNeeded;
- return flags;
-}
-
uint32_t SurfaceFlinger::addInputWindowCommands(const InputWindowCommands& inputWindowCommands) {
bool hasChanges = mInputWindowCommands.merge(inputWindowCommands);
return hasChanges ? eTraversalNeeded : 0;
@@ -5025,7 +4802,6 @@
LayerCreationArgs mirrorArgs(args);
mirrorArgs.flags |= ISurfaceComposerClient::eNoColorFill;
mirrorArgs.addToRoot = true;
- mirrorArgs.layerStackToMirror = layerStack;
result = createEffectLayer(mirrorArgs, &outResult.handle, &rootMirrorLayer);
outResult.layerId = rootMirrorLayer->sequence;
outResult.layerName = String16(rootMirrorLayer->getDebugName());
@@ -5128,12 +4904,7 @@
setTransactionFlags(eTransactionNeeded);
}
-void SurfaceFlinger::onHandleDestroyed(BBinder* handle, sp<Layer>& layer, uint32_t layerId) {
- {
- std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
- mDestroyedHandles.emplace_back(layerId);
- }
-
+void SurfaceFlinger::onHandleDestroyed(BBinder* handle, sp<Layer>& layer, uint32_t /* layerId */) {
Mutex::Autolock lock(mStateLock);
markLayerPendingRemovalLocked(layer);
mBufferCountTracker.remove(handle);
@@ -5141,8 +4912,6 @@
if (mTransactionTracing) {
mTransactionTracing->onHandleRemoved(handle);
}
-
- setTransactionFlags(eTransactionFlushNeeded);
}
void SurfaceFlinger::onInitializeDisplays() {
@@ -6712,15 +6481,10 @@
args.useIdentityTransform, args.captureSecureLayers);
});
- GetLayerSnapshotsFunction getLayerSnapshots;
- if (mLayerLifecycleManagerEnabled) {
- getLayerSnapshots = getLayerSnapshotsForScreenshots(layerStack, args.uid);
- } else {
- auto traverseLayers = [this, args, layerStack](const LayerVector::Visitor& visitor) {
- traverseLayersInLayerStack(layerStack, args.uid, visitor);
- };
- getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
- }
+ auto traverseLayers = [this, args, layerStack](const LayerVector::Visitor& visitor) {
+ traverseLayersInLayerStack(layerStack, args.uid, visitor);
+ };
+ auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
auto future = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize,
args.pixelFormat, args.allowProtected, args.grayscale,
@@ -6754,15 +6518,10 @@
false /* captureSecureLayers */);
});
- GetLayerSnapshotsFunction getLayerSnapshots;
- if (mLayerLifecycleManagerEnabled) {
- getLayerSnapshots = getLayerSnapshotsForScreenshots(layerStack, CaptureArgs::UNSET_UID);
- } else {
- auto traverseLayers = [this, layerStack](const LayerVector::Visitor& visitor) {
- traverseLayersInLayerStack(layerStack, CaptureArgs::UNSET_UID, visitor);
- };
- getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
- }
+ auto traverseLayers = [this, layerStack](const LayerVector::Visitor& visitor) {
+ traverseLayersInLayerStack(layerStack, CaptureArgs::UNSET_UID, visitor);
+ };
+ auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
if (captureListener == nullptr) {
ALOGE("capture screen must provide a capture listener callback");
@@ -6857,37 +6616,29 @@
return std::make_unique<LayerRenderArea>(*this, parent, crop, reqSize, dataspace,
childrenOnly, args.captureSecureLayers);
});
- GetLayerSnapshotsFunction getLayerSnapshots;
- if (mLayerLifecycleManagerEnabled) {
- FloatRect parentCrop = crop.isEmpty() ? FloatRect(0, 0, reqSize.width, reqSize.height)
- : crop.toFloatRect();
- getLayerSnapshots = getLayerSnapshotsForScreenshots(parent->sequence, args.uid,
- std::move(excludeLayerIds),
- args.childrenOnly, parentCrop);
- } else {
- auto traverseLayers = [parent, args, excludeLayerIds](const LayerVector::Visitor& visitor) {
- parent->traverseChildrenInZOrder(LayerVector::StateSet::Drawing, [&](Layer* layer) {
- if (!layer->isVisible()) {
- return;
- } else if (args.childrenOnly && layer == parent.get()) {
- return;
- } else if (args.uid != CaptureArgs::UNSET_UID && args.uid != layer->getOwnerUid()) {
+
+ auto traverseLayers = [parent, args, excludeLayerIds](const LayerVector::Visitor& visitor) {
+ parent->traverseChildrenInZOrder(LayerVector::StateSet::Drawing, [&](Layer* layer) {
+ if (!layer->isVisible()) {
+ return;
+ } else if (args.childrenOnly && layer == parent.get()) {
+ return;
+ } else if (args.uid != CaptureArgs::UNSET_UID && args.uid != layer->getOwnerUid()) {
+ return;
+ }
+
+ auto p = sp<Layer>::fromExisting(layer);
+ while (p != nullptr) {
+ if (excludeLayerIds.count(p->sequence) != 0) {
return;
}
+ p = p->getParent();
+ }
- auto p = sp<Layer>::fromExisting(layer);
- while (p != nullptr) {
- if (excludeLayerIds.count(p->sequence) != 0) {
- return;
- }
- p = p->getParent();
- }
-
- visitor(layer);
- });
- };
- getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
- }
+ visitor(layer);
+ });
+ };
+ auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
if (captureListener == nullptr) {
ALOGE("capture screen must provide a capture listener callback");
@@ -7669,18 +7420,24 @@
return true;
}
-bool SurfaceFlinger::commitCreatedLayers(VsyncId vsyncId,
- std::vector<LayerCreatedState>& createdLayers) {
- if (createdLayers.size() == 0) {
- return false;
+bool SurfaceFlinger::commitCreatedLayers(VsyncId vsyncId) {
+ std::vector<LayerCreatedState> createdLayers;
+ {
+ std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
+ createdLayers = std::move(mCreatedLayers);
+ mCreatedLayers.clear();
+ if (createdLayers.size() == 0) {
+ return false;
+ }
}
Mutex::Autolock _l(mStateLock);
for (const auto& createdLayer : createdLayers) {
handleLayerCreatedLocked(createdLayer, vsyncId);
}
+ createdLayers.clear();
mLayersAdded = true;
- return mLayersAdded;
+ return true;
}
void SurfaceFlinger::updateLayerMetadataSnapshot() {
@@ -7708,150 +7465,6 @@
});
}
-void SurfaceFlinger::moveSnapshotsFromCompositionArgs(
- compositionengine::CompositionRefreshArgs& refreshArgs,
- std::vector<std::pair<Layer*, LayerFE*>>& layers) {
- if (mLayerLifecycleManagerEnabled) {
- std::vector<std::unique_ptr<frontend::LayerSnapshot>>& snapshots =
- mLayerSnapshotBuilder.getSnapshots();
- for (auto [_, layerFE] : layers) {
- auto i = layerFE->mSnapshot->globalZ;
- snapshots[i] = std::move(layerFE->mSnapshot);
- }
- }
- if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
- for (auto [layer, layerFE] : layers) {
- layer->updateLayerSnapshot(std::move(layerFE->mSnapshot));
- }
- }
-}
-
-std::vector<std::pair<Layer*, LayerFE*>> SurfaceFlinger::moveSnapshotsToCompositionArgs(
- compositionengine::CompositionRefreshArgs& refreshArgs, bool cursorOnly, int64_t vsyncId) {
- std::vector<std::pair<Layer*, LayerFE*>> layers;
- if (mLayerLifecycleManagerEnabled) {
- mLayerSnapshotBuilder.forEachVisibleSnapshot(
- [&](std::unique_ptr<frontend::LayerSnapshot>& snapshot) {
- if (cursorOnly &&
- snapshot->compositionType !=
- aidl::android::hardware::graphics::composer3::Composition::CURSOR) {
- return;
- }
-
- if (!snapshot->hasSomethingToDraw()) {
- return;
- }
-
- auto it = mLegacyLayers.find(snapshot->sequence);
- LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(),
- "Couldnt find layer object for %s",
- snapshot->getDebugString().c_str());
- auto& legacyLayer = it->second;
- sp<LayerFE> layerFE = legacyLayer->getCompositionEngineLayerFE(snapshot->path);
- layerFE->mSnapshot = std::move(snapshot);
- refreshArgs.layers.push_back(layerFE);
- layers.emplace_back(legacyLayer.get(), layerFE.get());
- });
- }
- if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
- mDrawingState.traverseInZOrder([&refreshArgs, cursorOnly, &layers](Layer* layer) {
- if (auto layerFE = layer->getCompositionEngineLayerFE()) {
- if (cursorOnly &&
- layer->getLayerSnapshot()->compositionType !=
- aidl::android::hardware::graphics::composer3::Composition::CURSOR)
- return;
- layer->updateSnapshot(/* refreshArgs.updatingGeometryThisFrame */ true);
- layerFE->mSnapshot = layer->stealLayerSnapshot();
- refreshArgs.layers.push_back(layerFE);
- layers.emplace_back(layer, layerFE.get());
- }
- });
- }
-
- return layers;
-}
-
-std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()>
-SurfaceFlinger::getLayerSnapshotsForScreenshots(std::optional<ui::LayerStack> layerStack,
- uint32_t uid) {
- return [this, layerStack, uid]() {
- std::vector<std::pair<Layer*, sp<LayerFE>>> layers;
- for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
- if (layerStack && snapshot->outputFilter.layerStack != *layerStack) {
- continue;
- }
- if (uid != CaptureArgs::UNSET_UID && snapshot->inputInfo.ownerUid != uid) {
- continue;
- }
- if (!snapshot->isVisible || !snapshot->hasSomethingToDraw()) {
- continue;
- }
-
- auto it = mLegacyLayers.find(snapshot->sequence);
- LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(), "Couldnt find layer object for %s",
- snapshot->getDebugString().c_str());
- auto& legacyLayer = it->second;
- sp<LayerFE> layerFE = getFactory().createLayerFE(legacyLayer->getName());
- layerFE->mSnapshot = std::make_unique<frontend::LayerSnapshot>(*snapshot);
- layers.emplace_back(legacyLayer.get(), std::move(layerFE));
- }
-
- return layers;
- };
-}
-
-std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()>
-SurfaceFlinger::getLayerSnapshotsForScreenshots(uint32_t rootLayerId, uint32_t uid,
- std::unordered_set<uint32_t> excludeLayerIds,
- bool childrenOnly, const FloatRect& parentCrop) {
- return [this, excludeLayerIds = std::move(excludeLayerIds), uid, rootLayerId, childrenOnly,
- parentCrop]() {
- frontend::LayerSnapshotBuilder::Args
- args{.root = mLayerHierarchyBuilder.getPartialHierarchy(rootLayerId, childrenOnly),
- .layerLifecycleManager = mLayerLifecycleManager,
- .displays = mFrontEndDisplayInfos,
- .displayChanges = true,
- .globalShadowSettings = mDrawingState.globalShadowSettings,
- .supportsBlur = mSupportsBlur,
- .forceFullDamage = mForceFullDamage,
- .parentCrop = {parentCrop},
- .excludeLayerIds = std::move(excludeLayerIds)};
- mLayerSnapshotBuilder.update(args);
-
- auto getLayerSnapshotsFn = getLayerSnapshotsForScreenshots({}, uid);
- std::vector<std::pair<Layer*, sp<LayerFE>>> layers = getLayerSnapshotsFn();
- args.root = mLayerHierarchyBuilder.getHierarchy();
- args.parentCrop.reset();
- args.excludeLayerIds.clear();
- mLayerSnapshotBuilder.update(args);
- return layers;
- };
-}
-
-SurfaceFlinger::LifecycleUpdate SurfaceFlinger::flushLifecycleUpdates() {
- LifecycleUpdate update;
- ATRACE_NAME("TransactionHandler:flushTransactions");
- // Locking:
- // 1. to prevent onHandleDestroyed from being called while the state lock is held,
- // we must keep a copy of the transactions (specifically the composer
- // states) around outside the scope of the lock.
- // 2. Transactions and created layers do not share a lock. To prevent applying
- // transactions with layers still in the createdLayer queue, flush the transactions
- // before committing the created layers.
- update.transactions = mTransactionHandler.flushTransactions();
- {
- // TODO(b/238781169) lockless queue this and keep order.
- std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
- update.layerCreatedStates = std::move(mCreatedLayers);
- mCreatedLayers.clear();
- update.newLayers = std::move(mNewLayers);
- mNewLayers.clear();
- update.destroyedHandles = std::move(mDestroyedHandles);
- mDestroyedHandles.clear();
- }
- return update;
-}
-
// gui::ISurfaceComposer
binder::Status SurfaceComposerAIDL::bootFinished() {
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 5b8038b..97469f4 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -71,9 +71,7 @@
#include "FlagManager.h"
#include "FrontEnd/DisplayInfo.h"
#include "FrontEnd/LayerCreationArgs.h"
-#include "FrontEnd/LayerLifecycleManager.h"
#include "FrontEnd/LayerSnapshot.h"
-#include "FrontEnd/LayerSnapshotBuilder.h"
#include "FrontEnd/TransactionHandler.h"
#include "LayerVector.h"
#include "Scheduler/RefreshRateSelector.h"
@@ -451,26 +449,6 @@
FINISHED,
};
- struct LayerCreatedState {
- LayerCreatedState(const wp<Layer>& layer, const wp<Layer>& parent, bool addToRoot)
- : layer(layer), initialParent(parent), addToRoot(addToRoot) {}
- wp<Layer> layer;
- // Indicates the initial parent of the created layer, only used for creating layer in
- // SurfaceFlinger. If nullptr, it may add the created layer into the current root layers.
- wp<Layer> initialParent;
- // Indicates whether the layer getting created should be added at root if there's no parent
- // and has permission ACCESS_SURFACE_FLINGER. If set to false and no parent, the layer will
- // be added offscreen.
- bool addToRoot;
- };
-
- struct LifecycleUpdate {
- std::vector<TransactionState> transactions;
- std::vector<LayerCreatedState> layerCreatedStates;
- std::vector<std::unique_ptr<frontend::RequestedLayerState>> newLayers;
- std::vector<uint32_t> destroyedHandles;
- };
-
template <typename F, std::enable_if_t<!std::is_member_function_pointer_v<F>>* = nullptr>
static Dumper dumper(F&& dump) {
using namespace std::placeholders;
@@ -710,17 +688,6 @@
void updateLayerGeometry();
void updateLayerMetadataSnapshot();
- std::vector<std::pair<Layer*, LayerFE*>> moveSnapshotsToCompositionArgs(
- compositionengine::CompositionRefreshArgs& refreshArgs, bool cursorOnly,
- int64_t vsyncId);
- void moveSnapshotsFromCompositionArgs(compositionengine::CompositionRefreshArgs& refreshArgs,
- std::vector<std::pair<Layer*, LayerFE*>>& layers);
- bool updateLayerSnapshotsLegacy(VsyncId vsyncId, LifecycleUpdate& update,
- bool transactionsFlushed, bool& out)
- REQUIRES(kMainThreadContext);
- bool updateLayerSnapshots(VsyncId vsyncId, LifecycleUpdate& update, bool transactionsFlushed,
- bool& out) REQUIRES(kMainThreadContext);
- LifecycleUpdate flushLifecycleUpdates() REQUIRES(kMainThreadContext);
void updateInputFlinger();
void persistDisplayBrightness(bool needsComposite) REQUIRES(kMainThreadContext);
@@ -748,8 +715,6 @@
bool flushTransactionQueues(VsyncId) REQUIRES(kMainThreadContext);
bool applyTransactions(std::vector<TransactionState>&, VsyncId) REQUIRES(kMainThreadContext);
- bool applyAndCommitDisplayTransactionStates(std::vector<TransactionState>& transactions)
- REQUIRES(kMainThreadContext);
// Returns true if there is at least one transaction that needs to be flushed
bool transactionFlushNeeded();
@@ -765,10 +730,7 @@
int64_t desiredPresentTime, bool isAutoTimestamp,
int64_t postTime, uint32_t permissions, uint64_t transactionId)
REQUIRES(mStateLock);
- uint32_t updateLayerCallbacksAndStats(const FrameTimelineInfo&, ResolvedComposerState&,
- int64_t desiredPresentTime, bool isAutoTimestamp,
- int64_t postTime, uint32_t permissions,
- uint64_t transactionId) REQUIRES(mStateLock);
+
uint32_t getTransactionFlags() const;
// Sets the masked bits, and schedules a commit if needed.
@@ -926,7 +888,7 @@
// mark a region of a layer stack dirty. this updates the dirty
// region of all screens presenting this layer stack.
- void invalidateLayerStack(const ui::LayerFilter& layerFilter, const Region& dirty);
+ void invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty);
ui::LayerFilter makeLayerFilterForDisplay(DisplayId displayId, ui::LayerStack layerStack)
REQUIRES(mStateLock) {
@@ -1184,7 +1146,6 @@
// Set if LayerMetadata has changed since the last LayerMetadata snapshot.
bool mLayerMetadataSnapshotNeeded = false;
- // TODO(b/238781169) validate these on composition
// Tracks layers that have pending frames which are candidates for being
// latched.
std::unordered_set<sp<Layer>, SpHash<Layer>> mLayersWithQueuedFrames;
@@ -1359,11 +1320,23 @@
GUARDED_BY(mStateLock);
mutable std::mutex mCreatedLayersLock;
+ struct LayerCreatedState {
+ LayerCreatedState(const wp<Layer>& layer, const wp<Layer> parent, bool addToRoot)
+ : layer(layer), initialParent(parent), addToRoot(addToRoot) {}
+ wp<Layer> layer;
+ // Indicates the initial parent of the created layer, only used for creating layer in
+ // SurfaceFlinger. If nullptr, it may add the created layer into the current root layers.
+ wp<Layer> initialParent;
+ // Indicates whether the layer getting created should be added at root if there's no parent
+ // and has permission ACCESS_SURFACE_FLINGER. If set to false and no parent, the layer will
+ // be added offscreen.
+ bool addToRoot;
+ };
// A temporay pool that store the created layers and will be added to current state in main
// thread.
std::vector<LayerCreatedState> mCreatedLayers GUARDED_BY(mCreatedLayersLock);
- bool commitCreatedLayers(VsyncId, std::vector<LayerCreatedState>& createdLayers);
+ bool commitCreatedLayers(VsyncId);
void handleLayerCreatedLocked(const LayerCreatedState&, VsyncId) REQUIRES(mStateLock);
mutable std::mutex mMirrorDisplayLock;
@@ -1385,11 +1358,6 @@
return hasDisplay(
[](const auto& display) { return display.isRefreshRateOverlayEnabled(); });
}
- std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()> getLayerSnapshotsForScreenshots(
- std::optional<ui::LayerStack> layerStack, uint32_t uid);
- std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()> getLayerSnapshotsForScreenshots(
- uint32_t rootLayerId, uint32_t uid, std::unordered_set<uint32_t> excludeLayerIds,
- bool childrenOnly, const FloatRect& parentCrop);
const sp<WindowInfosListenerInvoker> mWindowInfosListenerInvoker;
@@ -1402,18 +1370,6 @@
bool mPowerHintSessionEnabled;
- bool mLayerLifecycleManagerEnabled = false;
- bool mLegacyFrontEndEnabled = true;
-
- frontend::LayerLifecycleManager mLayerLifecycleManager;
- frontend::LayerHierarchyBuilder mLayerHierarchyBuilder{{}};
- frontend::LayerSnapshotBuilder mLayerSnapshotBuilder;
-
- std::vector<uint32_t> mDestroyedHandles;
- std::vector<std::unique_ptr<frontend::RequestedLayerState>> mNewLayers;
- // These classes do not store any client state but help with managing transaction callbacks
- // and stats.
- std::unordered_map<uint32_t, sp<Layer>> mLegacyLayers;
struct {
bool late = false;
bool early = false;
@@ -1421,7 +1377,6 @@
TransactionHandler mTransactionHandler;
display::DisplayMap<ui::LayerStack, frontend::DisplayInfo> mFrontEndDisplayInfos;
- bool mFrontEndDisplayInfosChanged = false;
};
class SurfaceComposerAIDL : public gui::BnSurfaceComposer {
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp b/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
index c088e7b..11719c4 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
@@ -148,7 +148,7 @@
layer->fenceHasSignaled();
layer->onPreComposition(mFdp.ConsumeIntegral<int64_t>());
const std::vector<sp<CallbackHandle>> callbacks;
- layer->setTransactionCompletedListeners(callbacks, mFdp.ConsumeBool());
+ layer->setTransactionCompletedListeners(callbacks);
std::shared_ptr<renderengine::ExternalTexture> texture = std::make_shared<
renderengine::mock::FakeExternalTexture>(mFdp.ConsumeIntegral<uint32_t>(),