Merge changes from topics "styluspointericon-config", "styluspointericon-presentation"
* changes:
Add testcase for StylusPointer
check config for showing a stylus pointer (native part)
Separate default pointer for mouse and stylus (native part)
[scribe] show stylus hover icon
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 695faf8..91bcb8d 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -39,6 +39,11 @@
namespace android {
+bool is_multiuser_uid_isolated(uid_t uid) {
+ uid_t appid = multiuser_get_app_id(uid);
+ return appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END;
+}
+
#ifndef VENDORSERVICEMANAGER
struct ManifestWithDescription {
@@ -273,13 +278,8 @@
if (auto it = mNameToService.find(name); it != mNameToService.end()) {
service = &(it->second);
- if (!service->allowIsolated) {
- uid_t appid = multiuser_get_app_id(ctx.uid);
- bool isIsolated = appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END;
-
- if (isIsolated) {
- return nullptr;
- }
+ if (!service->allowIsolated && is_multiuser_uid_isolated(ctx.uid)) {
+ return nullptr;
}
out = service->binder;
}
@@ -425,7 +425,17 @@
auto ctx = mAccess->getCallingContext();
if (!mAccess->canFind(ctx, name)) {
- return Status::fromExceptionCode(Status::EX_SECURITY);
+ return Status::fromExceptionCode(Status::EX_SECURITY, "SELinux");
+ }
+
+ // note - we could allow isolated apps to get notifications if we
+ // keep track of isolated callbacks and non-isolated callbacks, but
+ // this is done since isolated apps shouldn't access lazy services
+ // so we should be able to use different APIs to keep things simple.
+ // Here, we disallow everything, because the service might not be
+ // registered yet.
+ if (is_multiuser_uid_isolated(ctx.uid)) {
+ return Status::fromExceptionCode(Status::EX_SECURITY, "isolated app");
}
if (!isValidServiceName(name)) {
diff --git a/cmds/servicemanager/test_sm.cpp b/cmds/servicemanager/test_sm.cpp
index 0fd8d8e..cae32e3 100644
--- a/cmds/servicemanager/test_sm.cpp
+++ b/cmds/servicemanager/test_sm.cpp
@@ -383,6 +383,22 @@
sp<CallbackHistorian> cb = sp<CallbackHistorian>::make();
+ EXPECT_EQ(sm->registerForNotifications("foofoo", cb).exceptionCode(), Status::EX_SECURITY);
+}
+
+TEST(GetService, IsolatedCantRegister) {
+ std::unique_ptr<MockAccess> access = std::make_unique<NiceMock<MockAccess>>();
+
+ EXPECT_CALL(*access, getCallingContext())
+ .WillOnce(Return(Access::CallingContext{
+ .uid = AID_ISOLATED_START,
+ }));
+ EXPECT_CALL(*access, canFind(_, _)).WillOnce(Return(true));
+
+ sp<ServiceManager> sm = sp<ServiceManager>::make(std::move(access));
+
+ sp<CallbackHistorian> cb = sp<CallbackHistorian>::make();
+
EXPECT_EQ(sm->registerForNotifications("foofoo", cb).exceptionCode(),
Status::EX_SECURITY);
}
diff --git a/libs/gui/Choreographer.cpp b/libs/gui/Choreographer.cpp
index 6b25b26..99bf6ba 100644
--- a/libs/gui/Choreographer.cpp
+++ b/libs/gui/Choreographer.cpp
@@ -101,8 +101,9 @@
return gChoreographer;
}
-Choreographer::Choreographer(const sp<Looper>& looper)
- : DisplayEventDispatcher(looper, gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp),
+Choreographer::Choreographer(const sp<Looper>& looper, const sp<IBinder>& layerHandle)
+ : DisplayEventDispatcher(looper, gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp, {},
+ layerHandle),
mLooper(looper),
mThreadId(std::this_thread::get_id()) {
std::lock_guard<std::mutex> _l(gChoreographers.lock);
diff --git a/libs/gui/DisplayEventDispatcher.cpp b/libs/gui/DisplayEventDispatcher.cpp
index 501e69a..8a88377 100644
--- a/libs/gui/DisplayEventDispatcher.cpp
+++ b/libs/gui/DisplayEventDispatcher.cpp
@@ -37,9 +37,10 @@
DisplayEventDispatcher::DisplayEventDispatcher(const sp<Looper>& looper,
gui::ISurfaceComposer::VsyncSource vsyncSource,
- EventRegistrationFlags eventRegistration)
+ EventRegistrationFlags eventRegistration,
+ const sp<IBinder>& layerHandle)
: mLooper(looper),
- mReceiver(vsyncSource, eventRegistration),
+ mReceiver(vsyncSource, eventRegistration, layerHandle),
mWaitingForVsync(false),
mLastVsyncCount(0),
mLastScheduleVsyncTime(0) {
diff --git a/libs/gui/DisplayEventReceiver.cpp b/libs/gui/DisplayEventReceiver.cpp
index c52fb6b..6849a95 100644
--- a/libs/gui/DisplayEventReceiver.cpp
+++ b/libs/gui/DisplayEventReceiver.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#define LOG_TAG "DisplayEventReceiver"
+
#include <string.h>
#include <utils/Errors.h>
@@ -32,7 +34,8 @@
// ---------------------------------------------------------------------------
DisplayEventReceiver::DisplayEventReceiver(gui::ISurfaceComposer::VsyncSource vsyncSource,
- EventRegistrationFlags eventRegistration) {
+ EventRegistrationFlags eventRegistration,
+ const sp<IBinder>& layerHandle) {
sp<gui::ISurfaceComposer> sf(ComposerServiceAIDL::getComposerService());
if (sf != nullptr) {
mEventConnection = nullptr;
@@ -41,8 +44,8 @@
static_cast<
gui::ISurfaceComposer::EventRegistration>(
eventRegistration.get()),
- &mEventConnection);
- if (mEventConnection != nullptr) {
+ layerHandle, &mEventConnection);
+ if (status.isOk() && mEventConnection != nullptr) {
mDataChannel = std::make_unique<gui::BitTube>();
status = mEventConnection->stealReceiveChannel(mDataChannel.get());
if (!status.isOk()) {
@@ -51,6 +54,8 @@
mDataChannel.reset();
mEventConnection.clear();
}
+ } else {
+ ALOGE("DisplayEventConnection creation failed: status=%s", status.toString8().c_str());
}
}
}
diff --git a/libs/gui/SurfaceControl.cpp b/libs/gui/SurfaceControl.cpp
index 7aee882..c5f9c38 100644
--- a/libs/gui/SurfaceControl.cpp
+++ b/libs/gui/SurfaceControl.cpp
@@ -26,6 +26,7 @@
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/Log.h>
+#include <utils/Looper.h>
#include <utils/threads.h>
#include <binder/IPCThreadState.h>
@@ -34,8 +35,9 @@
#include <ui/Rect.h>
#include <ui/StaticDisplayInfo.h>
-#include <gui/BufferQueueCore.h>
#include <gui/BLASTBufferQueue.h>
+#include <gui/BufferQueueCore.h>
+#include <gui/Choreographer.h>
#include <gui/ISurfaceComposer.h>
#include <gui/Surface.h>
#include <gui/SurfaceComposerClient.h>
@@ -191,6 +193,24 @@
return mName;
}
+std::shared_ptr<Choreographer> SurfaceControl::getChoreographer() {
+ if (mChoreographer) {
+ return mChoreographer;
+ }
+ sp<Looper> looper = Looper::getForThread();
+ if (!looper.get()) {
+ ALOGE("%s: No looper prepared for thread", __func__);
+ return nullptr;
+ }
+ mChoreographer = std::make_shared<Choreographer>(looper, getHandle());
+ status_t result = mChoreographer->initialize();
+ if (result != OK) {
+ ALOGE("Failed to initialize choreographer");
+ mChoreographer = nullptr;
+ }
+ return mChoreographer;
+}
+
sp<IGraphicBufferProducer> SurfaceControl::getIGraphicBufferProducer()
{
getSurface();
diff --git a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
index 597749a..9812142 100644
--- a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
+++ b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
@@ -68,9 +68,15 @@
/**
* Create a display event connection
+ *
+ * layerHandle
+ * Optional binder handle representing a Layer in SF to associate the new
+ * DisplayEventConnection with. This handle can be found inside a surface control after
+ * surface creation, see ISurfaceComposerClient::createSurface. Set to null if no layer
+ * association should be made.
*/
@nullable IDisplayEventConnection createDisplayEventConnection(VsyncSource vsyncSource,
- EventRegistration eventRegistration);
+ EventRegistration eventRegistration, @nullable IBinder layerHandle);
/**
* Create a connection with SurfaceFlinger.
diff --git a/libs/gui/fuzzer/libgui_fuzzer_utils.h b/libs/gui/fuzzer/libgui_fuzzer_utils.h
index 14a0e39..f01c2a9 100644
--- a/libs/gui/fuzzer/libgui_fuzzer_utils.h
+++ b/libs/gui/fuzzer/libgui_fuzzer_utils.h
@@ -64,7 +64,7 @@
MOCK_METHOD(binder::Status, bootFinished, (), (override));
MOCK_METHOD(binder::Status, createDisplayEventConnection,
(gui::ISurfaceComposer::VsyncSource, gui::ISurfaceComposer::EventRegistration,
- sp<gui::IDisplayEventConnection>*),
+ const sp<IBinder>& /*layerHandle*/, sp<gui::IDisplayEventConnection>*),
(override));
MOCK_METHOD(binder::Status, createConnection, (sp<gui::ISurfaceComposerClient>*), (override));
MOCK_METHOD(binder::Status, createDisplay, (const std::string&, bool, float, sp<IBinder>*),
diff --git a/libs/gui/include/gui/Choreographer.h b/libs/gui/include/gui/Choreographer.h
index 89a7058..1df9b11 100644
--- a/libs/gui/include/gui/Choreographer.h
+++ b/libs/gui/include/gui/Choreographer.h
@@ -73,7 +73,8 @@
};
static Context gChoreographers;
- explicit Choreographer(const sp<Looper>& looper) EXCLUDES(gChoreographers.lock);
+ explicit Choreographer(const sp<Looper>& looper, const sp<IBinder>& layerHandle = nullptr)
+ EXCLUDES(gChoreographers.lock);
void postFrameCallbackDelayed(AChoreographer_frameCallback cb,
AChoreographer_frameCallback64 cb64,
AChoreographer_vsyncCallback vsyncCallback, void* data,
diff --git a/libs/gui/include/gui/DisplayEventDispatcher.h b/libs/gui/include/gui/DisplayEventDispatcher.h
index bf3a07b..140efa6 100644
--- a/libs/gui/include/gui/DisplayEventDispatcher.h
+++ b/libs/gui/include/gui/DisplayEventDispatcher.h
@@ -26,7 +26,8 @@
explicit DisplayEventDispatcher(const sp<Looper>& looper,
gui::ISurfaceComposer::VsyncSource vsyncSource =
gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
- EventRegistrationFlags eventRegistration = {});
+ EventRegistrationFlags eventRegistration = {},
+ const sp<IBinder>& layerHandle = nullptr);
status_t initialize();
void dispose();
diff --git a/libs/gui/include/gui/DisplayEventReceiver.h b/libs/gui/include/gui/DisplayEventReceiver.h
index 0f4907f..7fd6c35 100644
--- a/libs/gui/include/gui/DisplayEventReceiver.h
+++ b/libs/gui/include/gui/DisplayEventReceiver.h
@@ -119,7 +119,8 @@
*/
explicit DisplayEventReceiver(gui::ISurfaceComposer::VsyncSource vsyncSource =
gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
- EventRegistrationFlags eventRegistration = {});
+ EventRegistrationFlags eventRegistration = {},
+ const sp<IBinder>& layerHandle = nullptr);
/*
* ~DisplayEventReceiver severs the connection with SurfaceFlinger, new events
diff --git a/libs/gui/include/gui/SurfaceControl.h b/libs/gui/include/gui/SurfaceControl.h
index 1d4fc7f..344b957 100644
--- a/libs/gui/include/gui/SurfaceControl.h
+++ b/libs/gui/include/gui/SurfaceControl.h
@@ -36,6 +36,7 @@
// ---------------------------------------------------------------------------
+class Choreographer;
class IGraphicBufferProducer;
class Surface;
class SurfaceComposerClient;
@@ -80,6 +81,9 @@
int32_t getLayerId() const;
const std::string& getName() const;
+ // TODO(b/267195698): Consider renaming.
+ std::shared_ptr<Choreographer> getChoreographer();
+
sp<IGraphicBufferProducer> getIGraphicBufferProducer();
status_t clearLayerFrameStats() const;
@@ -130,6 +134,7 @@
PixelFormat mFormat = PIXEL_FORMAT_NONE;
uint32_t mCreateFlags = 0;
uint64_t mFallbackFrameNumber = 100;
+ std::shared_ptr<Choreographer> mChoreographer;
};
}; // namespace android
diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp
index 9b2bf7f..babc197 100644
--- a/libs/gui/tests/Surface_test.cpp
+++ b/libs/gui/tests/Surface_test.cpp
@@ -725,6 +725,7 @@
binder::Status createDisplayEventConnection(
VsyncSource /*vsyncSource*/, EventRegistration /*eventRegistration*/,
+ const sp<IBinder>& /*layerHandle*/,
sp<gui::IDisplayEventConnection>* outConnection) override {
*outConnection = nullptr;
return binder::Status::ok();
diff --git a/opengl/libs/Android.bp b/opengl/libs/Android.bp
index 750338b..49e1cba 100644
--- a/opengl/libs/Android.bp
+++ b/opengl/libs/Android.bp
@@ -144,6 +144,7 @@
srcs: [
"EGL/BlobCache.cpp",
"EGL/FileBlobCache.cpp",
+ "EGL/MultifileBlobCache.cpp",
],
export_include_dirs: ["EGL"],
}
@@ -160,7 +161,6 @@
srcs: [
"EGL/egl_tls.cpp",
"EGL/egl_cache.cpp",
- "EGL/egl_cache_multifile.cpp",
"EGL/egl_display.cpp",
"EGL/egl_object.cpp",
"EGL/egl_layers.cpp",
@@ -205,6 +205,11 @@
srcs: [
"EGL/BlobCache.cpp",
"EGL/BlobCache_test.cpp",
+ "EGL/MultifileBlobCache.cpp",
+ "EGL/MultifileBlobCache_test.cpp",
+ ],
+ shared_libs: [
+ "libutils",
],
}
diff --git a/opengl/libs/EGL/MultifileBlobCache.cpp b/opengl/libs/EGL/MultifileBlobCache.cpp
new file mode 100644
index 0000000..48b184b
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.cpp
@@ -0,0 +1,668 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+
+#include "MultifileBlobCache.h"
+
+#include <android-base/properties.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <log/log.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <utime.h>
+
+#include <algorithm>
+#include <chrono>
+#include <limits>
+#include <locale>
+
+#include <utils/JenkinsHash.h>
+
+using namespace std::literals;
+
+namespace {
+
+// Open the file and determine the size of the value it contains
+size_t getValueSizeFromFile(int fd, const std::string& entryPath) {
+ // Read the beginning of the file to get header
+ android::MultifileHeader header;
+ size_t result = read(fd, static_cast<void*>(&header), sizeof(android::MultifileHeader));
+ if (result != sizeof(android::MultifileHeader)) {
+ ALOGE("Error reading MultifileHeader from cache entry (%s): %s", entryPath.c_str(),
+ std::strerror(errno));
+ return 0;
+ }
+
+ return header.valueSize;
+}
+
+// Helper function to close entries or free them
+void freeHotCacheEntry(android::MultifileHotCache& entry) {
+ if (entry.entryFd != -1) {
+ // If we have an fd, then this entry was added to hot cache via INIT or GET
+ // We need to unmap and close the entry
+ munmap(entry.entryBuffer, entry.entrySize);
+ close(entry.entryFd);
+ } else {
+ // Otherwise, this was added to hot cache during SET, so it was never mapped
+ // and fd was only on the deferred thread.
+ delete[] entry.entryBuffer;
+ }
+}
+
+} // namespace
+
+namespace android {
+
+MultifileBlobCache::MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize,
+ const std::string& baseDir)
+ : mInitialized(false),
+ mMaxTotalSize(maxTotalSize),
+ mTotalCacheSize(0),
+ mHotCacheLimit(maxHotCacheSize),
+ mHotCacheSize(0),
+ mWorkerThreadIdle(true) {
+ if (baseDir.empty()) {
+ return;
+ }
+
+ // Establish the name of our multifile directory
+ mMultifileDirName = baseDir + ".multifile";
+
+ // Set a limit for max key and value, ensuring at least one entry can always fit in hot cache
+ mMaxKeySize = mHotCacheLimit / 4;
+ mMaxValueSize = mHotCacheLimit / 2;
+
+ // Initialize our cache with the contents of the directory
+ mTotalCacheSize = 0;
+
+ // See if the dir exists, and initialize using its contents
+ struct stat st;
+ if (stat(mMultifileDirName.c_str(), &st) == 0) {
+ // Read all the files and gather details, then preload their contents
+ DIR* dir;
+ struct dirent* entry;
+ if ((dir = opendir(mMultifileDirName.c_str())) != nullptr) {
+ while ((entry = readdir(dir)) != nullptr) {
+ if (entry->d_name == "."s || entry->d_name == ".."s) {
+ continue;
+ }
+
+ std::string entryName = entry->d_name;
+ std::string fullPath = mMultifileDirName + "/" + entryName;
+
+ // The filename is the same as the entryHash
+ uint32_t entryHash = static_cast<uint32_t>(strtoul(entry->d_name, nullptr, 10));
+
+ // Look up the details of the file
+ struct stat st;
+ if (stat(fullPath.c_str(), &st) != 0) {
+ ALOGE("Failed to stat %s", fullPath.c_str());
+ return;
+ }
+
+ // Open the file so we can read its header
+ int fd = open(fullPath.c_str(), O_RDONLY);
+ if (fd == -1) {
+ ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+ std::strerror(errno));
+ return;
+ }
+
+ // Look up the details we track about each file
+ size_t valueSize = getValueSizeFromFile(fd, fullPath);
+ size_t fileSize = st.st_size;
+ time_t accessTime = st.st_atime;
+
+ // If the cache entry is damaged or no good, remove it
+ // TODO: Perform any other checks
+ if (valueSize <= 0 || fileSize <= 0 || accessTime <= 0) {
+ if (remove(fullPath.c_str()) != 0) {
+ ALOGE("Error removing %s: %s", fullPath.c_str(), std::strerror(errno));
+ }
+ continue;
+ }
+
+ // Track details for rapid lookup later
+ trackEntry(entryHash, valueSize, fileSize, accessTime);
+
+ // Track the total size
+ increaseTotalCacheSize(fileSize);
+
+ // Preload the entry for fast retrieval
+ if ((mHotCacheSize + fileSize) < mHotCacheLimit) {
+ // Memory map the file
+ uint8_t* mappedEntry = reinterpret_cast<uint8_t*>(
+ mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+ if (mappedEntry == MAP_FAILED) {
+ ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+ }
+
+ ALOGV("INIT: Populating hot cache with fd = %i, cacheEntry = %p for "
+ "entryHash %u",
+ fd, mappedEntry, entryHash);
+
+ // Track the details of the preload so they can be retrieved later
+ if (!addToHotCache(entryHash, fd, mappedEntry, fileSize)) {
+ ALOGE("INIT Failed to add %u to hot cache", entryHash);
+ munmap(mappedEntry, fileSize);
+ close(fd);
+ return;
+ }
+ } else {
+ close(fd);
+ }
+ }
+ closedir(dir);
+ } else {
+ ALOGE("Unable to open filename: %s", mMultifileDirName.c_str());
+ }
+ } else {
+ // If the multifile directory does not exist, create it and start from scratch
+ if (mkdir(mMultifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
+ ALOGE("Unable to create directory (%s), errno (%i)", mMultifileDirName.c_str(), errno);
+ }
+ }
+
+ mTaskThread = std::thread(&MultifileBlobCache::processTasks, this);
+
+ mInitialized = true;
+}
+
+MultifileBlobCache::~MultifileBlobCache() {
+ // Inform the worker thread we're done
+ ALOGV("DESCTRUCTOR: Shutting down worker thread");
+ DeferredTask task(TaskCommand::Exit);
+ queueTask(std::move(task));
+
+ // Wait for it to complete
+ ALOGV("DESCTRUCTOR: Waiting for worker thread to complete");
+ waitForWorkComplete();
+ mTaskThread.join();
+}
+
+// Set will add the entry to hot cache and start a deferred process to write it to disk
+void MultifileBlobCache::set(const void* key, EGLsizeiANDROID keySize, const void* value,
+ EGLsizeiANDROID valueSize) {
+ if (!mInitialized) {
+ return;
+ }
+
+ // Ensure key and value are under their limits
+ if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+ ALOGV("SET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+ valueSize, mMaxValueSize);
+ return;
+ }
+
+ // Generate a hash of the key and use it to track this entry
+ uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+ size_t fileSize = sizeof(MultifileHeader) + keySize + valueSize;
+
+ // If we're going to be over the cache limit, kick off a trim to clear space
+ if (getTotalSize() + fileSize > mMaxTotalSize) {
+ ALOGV("SET: Cache is full, calling trimCache to clear space");
+ trimCache(mMaxTotalSize);
+ }
+
+ ALOGV("SET: Add %u to cache", entryHash);
+
+ uint8_t* buffer = new uint8_t[fileSize];
+
+ // Write the key and value after the header
+ android::MultifileHeader header = {keySize, valueSize};
+ memcpy(static_cast<void*>(buffer), static_cast<const void*>(&header),
+ sizeof(android::MultifileHeader));
+ memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader)), static_cast<const void*>(key),
+ keySize);
+ memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader) + keySize),
+ static_cast<const void*>(value), valueSize);
+
+ std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+ // Track the size and access time for quick recall
+ trackEntry(entryHash, valueSize, fileSize, time(0));
+
+ // Update the overall cache size
+ increaseTotalCacheSize(fileSize);
+
+ // Keep the entry in hot cache for quick retrieval
+ ALOGV("SET: Adding %u to hot cache.", entryHash);
+
+ // Sending -1 as the fd indicates we don't have an fd for this
+ if (!addToHotCache(entryHash, -1, buffer, fileSize)) {
+ ALOGE("GET: Failed to add %u to hot cache", entryHash);
+ return;
+ }
+
+ // Track that we're creating a pending write for this entry
+ // Include the buffer to handle the case when multiple writes are pending for an entry
+ mDeferredWrites.insert(std::make_pair(entryHash, buffer));
+
+ // Create deferred task to write to storage
+ ALOGV("SET: Adding task to queue.");
+ DeferredTask task(TaskCommand::WriteToDisk);
+ task.initWriteToDisk(fullPath, buffer, fileSize);
+ queueTask(std::move(task));
+}
+
+// Get will check the hot cache, then load it from disk if needed
+EGLsizeiANDROID MultifileBlobCache::get(const void* key, EGLsizeiANDROID keySize, void* value,
+ EGLsizeiANDROID valueSize) {
+ if (!mInitialized) {
+ return 0;
+ }
+
+ // Ensure key and value are under their limits
+ if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+ ALOGV("GET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+ valueSize, mMaxValueSize);
+ return 0;
+ }
+
+ // Generate a hash of the key and use it to track this entry
+ uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+ // See if we have this file
+ if (!contains(entryHash)) {
+ ALOGV("GET: Cache MISS - cache does not contain entry: %u", entryHash);
+ return 0;
+ }
+
+ // Look up the data for this entry
+ MultifileEntryStats entryStats = getEntryStats(entryHash);
+
+ size_t cachedValueSize = entryStats.valueSize;
+ if (cachedValueSize > valueSize) {
+ ALOGV("GET: Cache MISS - valueSize not large enough (%lu) for entry %u, returning required"
+ "size (%zu)",
+ valueSize, entryHash, cachedValueSize);
+ return cachedValueSize;
+ }
+
+ // We have the file and have enough room to write it out, return the entry
+ ALOGV("GET: Cache HIT - cache contains entry: %u", entryHash);
+
+ // Look up the size of the file
+ size_t fileSize = entryStats.fileSize;
+ if (keySize > fileSize) {
+ ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
+ "file",
+ keySize, fileSize);
+ return 0;
+ }
+
+ std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+ // Open the hashed filename path
+ uint8_t* cacheEntry = 0;
+
+ // Check hot cache
+ if (mHotCache.find(entryHash) != mHotCache.end()) {
+ ALOGV("GET: HotCache HIT for entry %u", entryHash);
+ cacheEntry = mHotCache[entryHash].entryBuffer;
+ } else {
+ ALOGV("GET: HotCache MISS for entry: %u", entryHash);
+
+ if (mDeferredWrites.find(entryHash) != mDeferredWrites.end()) {
+ // Wait for writes to complete if there is an outstanding write for this entry
+ ALOGV("GET: Waiting for write to complete for %u", entryHash);
+ waitForWorkComplete();
+ }
+
+ // Open the entry file
+ int fd = open(fullPath.c_str(), O_RDONLY);
+ if (fd == -1) {
+ ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+ std::strerror(errno));
+ return 0;
+ }
+
+ // Memory map the file
+ cacheEntry =
+ reinterpret_cast<uint8_t*>(mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+ if (cacheEntry == MAP_FAILED) {
+ ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+ close(fd);
+ return 0;
+ }
+
+ ALOGV("GET: Adding %u to hot cache", entryHash);
+ if (!addToHotCache(entryHash, fd, cacheEntry, fileSize)) {
+ ALOGE("GET: Failed to add %u to hot cache", entryHash);
+ return 0;
+ }
+
+ cacheEntry = mHotCache[entryHash].entryBuffer;
+ }
+
+ // Ensure the header matches
+ MultifileHeader* header = reinterpret_cast<MultifileHeader*>(cacheEntry);
+ if (header->keySize != keySize || header->valueSize != valueSize) {
+ ALOGW("Mismatch on keySize(%ld vs. cached %ld) or valueSize(%ld vs. cached %ld) compared "
+ "to cache header values for fullPath: %s",
+ keySize, header->keySize, valueSize, header->valueSize, fullPath.c_str());
+ removeFromHotCache(entryHash);
+ return 0;
+ }
+
+ // Compare the incoming key with our stored version (the beginning of the entry)
+ uint8_t* cachedKey = cacheEntry + sizeof(MultifileHeader);
+ int compare = memcmp(cachedKey, key, keySize);
+ if (compare != 0) {
+ ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
+ removeFromHotCache(entryHash);
+ return 0;
+ }
+
+ // Remaining entry following the key is the value
+ uint8_t* cachedValue = cacheEntry + (keySize + sizeof(MultifileHeader));
+ memcpy(value, cachedValue, cachedValueSize);
+
+ return cachedValueSize;
+}
+
+void MultifileBlobCache::finish() {
+ // Wait for all deferred writes to complete
+ ALOGV("FINISH: Waiting for work to complete.");
+ waitForWorkComplete();
+
+ // Close all entries in the hot cache
+ for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+ uint32_t entryHash = hotCacheIter->first;
+ MultifileHotCache entry = hotCacheIter->second;
+
+ ALOGV("FINISH: Closing hot cache entry for %u", entryHash);
+ freeHotCacheEntry(entry);
+
+ mHotCache.erase(hotCacheIter++);
+ }
+}
+
+void MultifileBlobCache::trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+ time_t accessTime) {
+ mEntries.insert(entryHash);
+ mEntryStats[entryHash] = {valueSize, fileSize, accessTime};
+}
+
+bool MultifileBlobCache::contains(uint32_t hashEntry) const {
+ return mEntries.find(hashEntry) != mEntries.end();
+}
+
+MultifileEntryStats MultifileBlobCache::getEntryStats(uint32_t entryHash) {
+ return mEntryStats[entryHash];
+}
+
+void MultifileBlobCache::increaseTotalCacheSize(size_t fileSize) {
+ mTotalCacheSize += fileSize;
+}
+
+void MultifileBlobCache::decreaseTotalCacheSize(size_t fileSize) {
+ mTotalCacheSize -= fileSize;
+}
+
+bool MultifileBlobCache::addToHotCache(uint32_t newEntryHash, int newFd, uint8_t* newEntryBuffer,
+ size_t newEntrySize) {
+ ALOGV("HOTCACHE(ADD): Adding %u to hot cache", newEntryHash);
+
+ // Clear space if we need to
+ if ((mHotCacheSize + newEntrySize) > mHotCacheLimit) {
+ ALOGV("HOTCACHE(ADD): mHotCacheSize (%zu) + newEntrySize (%zu) is to big for "
+ "mHotCacheLimit "
+ "(%zu), freeing up space for %u",
+ mHotCacheSize, newEntrySize, mHotCacheLimit, newEntryHash);
+
+ // Wait for all the files to complete writing so our hot cache is accurate
+ waitForWorkComplete();
+
+ // Free up old entries until under the limit
+ for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+ uint32_t oldEntryHash = hotCacheIter->first;
+ MultifileHotCache oldEntry = hotCacheIter->second;
+
+ // Move our iterator before deleting the entry
+ hotCacheIter++;
+ if (!removeFromHotCache(oldEntryHash)) {
+ ALOGE("HOTCACHE(ADD): Unable to remove entry %u", oldEntryHash);
+ return false;
+ }
+
+ // Clear at least half the hot cache
+ if ((mHotCacheSize + newEntrySize) <= mHotCacheLimit / 2) {
+ ALOGV("HOTCACHE(ADD): Freed enough space for %zu", mHotCacheSize);
+ break;
+ }
+ }
+ }
+
+ // Track it
+ mHotCache[newEntryHash] = {newFd, newEntryBuffer, newEntrySize};
+ mHotCacheSize += newEntrySize;
+
+ ALOGV("HOTCACHE(ADD): New hot cache size: %zu", mHotCacheSize);
+
+ return true;
+}
+
+bool MultifileBlobCache::removeFromHotCache(uint32_t entryHash) {
+ if (mHotCache.find(entryHash) != mHotCache.end()) {
+ ALOGV("HOTCACHE(REMOVE): Removing %u from hot cache", entryHash);
+
+ // Wait for all the files to complete writing so our hot cache is accurate
+ waitForWorkComplete();
+
+ ALOGV("HOTCACHE(REMOVE): Closing hot cache entry for %u", entryHash);
+ MultifileHotCache entry = mHotCache[entryHash];
+ freeHotCacheEntry(entry);
+
+ // Delete the entry from our tracking
+ mHotCacheSize -= entry.entrySize;
+ size_t count = mHotCache.erase(entryHash);
+
+ return true;
+ }
+
+ return false;
+}
+
+bool MultifileBlobCache::applyLRU(size_t cacheLimit) {
+ // Walk through our map of sorted last access times and remove files until under the limit
+ for (auto cacheEntryIter = mEntryStats.begin(); cacheEntryIter != mEntryStats.end();) {
+ uint32_t entryHash = cacheEntryIter->first;
+
+ ALOGV("LRU: Removing entryHash %u", entryHash);
+
+ // Track the overall size
+ MultifileEntryStats entryStats = getEntryStats(entryHash);
+ decreaseTotalCacheSize(entryStats.fileSize);
+
+ // Remove it from hot cache if present
+ removeFromHotCache(entryHash);
+
+ // Remove it from the system
+ std::string entryPath = mMultifileDirName + "/" + std::to_string(entryHash);
+ if (remove(entryPath.c_str()) != 0) {
+ ALOGE("LRU: Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
+ return false;
+ }
+
+ // Increment the iterator before clearing the entry
+ cacheEntryIter++;
+
+ // Delete the entry from our tracking
+ size_t count = mEntryStats.erase(entryHash);
+ if (count != 1) {
+ ALOGE("LRU: Failed to remove entryHash (%u) from mEntryStats", entryHash);
+ return false;
+ }
+
+ // See if it has been reduced enough
+ size_t totalCacheSize = getTotalSize();
+ if (totalCacheSize <= cacheLimit) {
+ // Success
+ ALOGV("LRU: Reduced cache to %zu", totalCacheSize);
+ return true;
+ }
+ }
+
+ ALOGV("LRU: Cache is emptry");
+ return false;
+}
+
+// When removing files, what fraction of the overall limit should be reached when removing files
+// A divisor of two will decrease the cache to 50%, four to 25% and so on
+constexpr uint32_t kCacheLimitDivisor = 2;
+
+// Calculate the cache size and remove old entries until under the limit
+void MultifileBlobCache::trimCache(size_t cacheByteLimit) {
+ // Start with the value provided by egl_cache
+ size_t limit = cacheByteLimit;
+
+ // Wait for all deferred writes to complete
+ waitForWorkComplete();
+
+ size_t size = getTotalSize();
+
+ // If size is larger than the threshold, remove files using LRU
+ if (size > limit) {
+ ALOGV("TRIM: Multifile cache size is larger than %zu, removing old entries",
+ cacheByteLimit);
+ if (!applyLRU(limit / kCacheLimitDivisor)) {
+ ALOGE("Error when clearing multifile shader cache");
+ return;
+ }
+ }
+}
+
+// This function performs a task. It only knows how to write files to disk,
+// but it could be expanded if needed.
+void MultifileBlobCache::processTask(DeferredTask& task) {
+ switch (task.getTaskCommand()) {
+ case TaskCommand::Exit: {
+ ALOGV("DEFERRED: Shutting down");
+ return;
+ }
+ case TaskCommand::WriteToDisk: {
+ uint32_t entryHash = task.getEntryHash();
+ std::string& fullPath = task.getFullPath();
+ uint8_t* buffer = task.getBuffer();
+ size_t bufferSize = task.getBufferSize();
+
+ // Create the file or reset it if already present, read+write for user only
+ int fd = open(fullPath.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+ if (fd == -1) {
+ ALOGE("Cache error in SET - failed to open fullPath: %s, error: %s",
+ fullPath.c_str(), std::strerror(errno));
+ return;
+ }
+
+ ALOGV("DEFERRED: Opened fd %i from %s", fd, fullPath.c_str());
+
+ ssize_t result = write(fd, buffer, bufferSize);
+ if (result != bufferSize) {
+ ALOGE("Error writing fileSize to cache entry (%s): %s", fullPath.c_str(),
+ std::strerror(errno));
+ return;
+ }
+
+ ALOGV("DEFERRED: Completed write for: %s", fullPath.c_str());
+ close(fd);
+
+ // Erase the entry from mDeferredWrites
+ // Since there could be multiple outstanding writes for an entry, find the matching one
+ typedef std::multimap<uint32_t, uint8_t*>::iterator entryIter;
+ std::pair<entryIter, entryIter> iterPair = mDeferredWrites.equal_range(entryHash);
+ for (entryIter it = iterPair.first; it != iterPair.second; ++it) {
+ if (it->second == buffer) {
+ ALOGV("DEFERRED: Marking write complete for %u at %p", it->first, it->second);
+ mDeferredWrites.erase(it);
+ break;
+ }
+ }
+
+ return;
+ }
+ default: {
+ ALOGE("DEFERRED: Unhandled task type");
+ return;
+ }
+ }
+}
+
+// This function will wait until tasks arrive, then execute them
+// If the exit command is submitted, the loop will terminate
+void MultifileBlobCache::processTasksImpl(bool* exitThread) {
+ while (true) {
+ std::unique_lock<std::mutex> lock(mWorkerMutex);
+ if (mTasks.empty()) {
+ ALOGV("WORKER: No tasks available, waiting");
+ mWorkerThreadIdle = true;
+ mWorkerIdleCondition.notify_all();
+ // Only wake if notified and command queue is not empty
+ mWorkAvailableCondition.wait(lock, [this] { return !mTasks.empty(); });
+ }
+
+ ALOGV("WORKER: Task available, waking up.");
+ mWorkerThreadIdle = false;
+ DeferredTask task = std::move(mTasks.front());
+ mTasks.pop();
+
+ if (task.getTaskCommand() == TaskCommand::Exit) {
+ ALOGV("WORKER: Exiting work loop.");
+ *exitThread = true;
+ mWorkerThreadIdle = true;
+ mWorkerIdleCondition.notify_one();
+ return;
+ }
+
+ lock.unlock();
+ processTask(task);
+ }
+}
+
+// Process tasks until the exit task is submitted
+void MultifileBlobCache::processTasks() {
+ while (true) {
+ bool exitThread = false;
+ processTasksImpl(&exitThread);
+ if (exitThread) {
+ break;
+ }
+ }
+}
+
+// Add a task to the queue to be processed by the worker thread
+void MultifileBlobCache::queueTask(DeferredTask&& task) {
+ std::lock_guard<std::mutex> queueLock(mWorkerMutex);
+ mTasks.emplace(std::move(task));
+ mWorkAvailableCondition.notify_one();
+}
+
+// Wait until all tasks have been completed
+void MultifileBlobCache::waitForWorkComplete() {
+ std::unique_lock<std::mutex> lock(mWorkerMutex);
+ mWorkerIdleCondition.wait(lock, [this] { return (mTasks.empty() && mWorkerThreadIdle); });
+}
+
+}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/MultifileBlobCache.h b/opengl/libs/EGL/MultifileBlobCache.h
new file mode 100644
index 0000000..dcdfe47
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.h
@@ -0,0 +1,164 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_MULTIFILE_BLOB_CACHE_H
+#define ANDROID_MULTIFILE_BLOB_CACHE_H
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include <future>
+#include <map>
+#include <queue>
+#include <string>
+#include <thread>
+#include <unordered_map>
+#include <unordered_set>
+
+namespace android {
+
+struct MultifileHeader {
+ EGLsizeiANDROID keySize;
+ EGLsizeiANDROID valueSize;
+};
+
+struct MultifileEntryStats {
+ EGLsizeiANDROID valueSize;
+ size_t fileSize;
+ time_t accessTime;
+};
+
+struct MultifileHotCache {
+ int entryFd;
+ uint8_t* entryBuffer;
+ size_t entrySize;
+};
+
+enum class TaskCommand {
+ Invalid = 0,
+ WriteToDisk,
+ Exit,
+};
+
+class DeferredTask {
+public:
+ DeferredTask(TaskCommand command) : mCommand(command) {}
+
+ TaskCommand getTaskCommand() { return mCommand; }
+
+ void initWriteToDisk(std::string fullPath, uint8_t* buffer, size_t bufferSize) {
+ mCommand = TaskCommand::WriteToDisk;
+ mFullPath = fullPath;
+ mBuffer = buffer;
+ mBufferSize = bufferSize;
+ }
+
+ uint32_t getEntryHash() { return mEntryHash; }
+ std::string& getFullPath() { return mFullPath; }
+ uint8_t* getBuffer() { return mBuffer; }
+ size_t getBufferSize() { return mBufferSize; };
+
+private:
+ TaskCommand mCommand;
+
+ // Parameters for WriteToDisk
+ uint32_t mEntryHash;
+ std::string mFullPath;
+ uint8_t* mBuffer;
+ size_t mBufferSize;
+};
+
+class MultifileBlobCache {
+public:
+ MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize, const std::string& baseDir);
+ ~MultifileBlobCache();
+
+ void set(const void* key, EGLsizeiANDROID keySize, const void* value,
+ EGLsizeiANDROID valueSize);
+ EGLsizeiANDROID get(const void* key, EGLsizeiANDROID keySize, void* value,
+ EGLsizeiANDROID valueSize);
+
+ void finish();
+
+ size_t getTotalSize() const { return mTotalCacheSize; }
+ void trimCache(size_t cacheByteLimit);
+
+private:
+ void trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+ time_t accessTime);
+ bool contains(uint32_t entryHash) const;
+ bool removeEntry(uint32_t entryHash);
+ MultifileEntryStats getEntryStats(uint32_t entryHash);
+
+ size_t getFileSize(uint32_t entryHash);
+ size_t getValueSize(uint32_t entryHash);
+
+ void increaseTotalCacheSize(size_t fileSize);
+ void decreaseTotalCacheSize(size_t fileSize);
+
+ bool addToHotCache(uint32_t entryHash, int fd, uint8_t* entryBufer, size_t entrySize);
+ bool removeFromHotCache(uint32_t entryHash);
+
+ bool applyLRU(size_t cacheLimit);
+
+ bool mInitialized;
+ std::string mMultifileDirName;
+
+ std::unordered_set<uint32_t> mEntries;
+ std::unordered_map<uint32_t, MultifileEntryStats> mEntryStats;
+ std::unordered_map<uint32_t, MultifileHotCache> mHotCache;
+
+ size_t mMaxKeySize;
+ size_t mMaxValueSize;
+ size_t mMaxTotalSize;
+ size_t mTotalCacheSize;
+ size_t mHotCacheLimit;
+ size_t mHotCacheEntryLimit;
+ size_t mHotCacheSize;
+
+ // Below are the components used to allow a deferred write
+
+ // Track whether we have pending writes for an entry
+ std::multimap<uint32_t, uint8_t*> mDeferredWrites;
+
+ // Functions to work through tasks in the queue
+ void processTasks();
+ void processTasksImpl(bool* exitThread);
+ void processTask(DeferredTask& task);
+
+ // Used by main thread to create work for the worker thread
+ void queueTask(DeferredTask&& task);
+
+ // Used by main thread to wait for worker thread to complete all outstanding work.
+ void waitForWorkComplete();
+
+ std::thread mTaskThread;
+ std::queue<DeferredTask> mTasks;
+ std::mutex mWorkerMutex;
+
+ // This condition will block the worker thread until a task is queued
+ std::condition_variable mWorkAvailableCondition;
+
+ // This condition will block the main thread while the worker thread still has tasks
+ std::condition_variable mWorkerIdleCondition;
+
+ // This bool will track whether all tasks have been completed
+ bool mWorkerThreadIdle;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MULTIFILE_BLOB_CACHE_H
diff --git a/opengl/libs/EGL/MultifileBlobCache_test.cpp b/opengl/libs/EGL/MultifileBlobCache_test.cpp
new file mode 100644
index 0000000..1a55a4f
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache_test.cpp
@@ -0,0 +1,200 @@
+/*
+ ** Copyright 2023, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#include "MultifileBlobCache.h"
+
+#include <android-base/test_utils.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <stdio.h>
+
+#include <memory>
+
+namespace android {
+
+template <typename T>
+using sp = std::shared_ptr<T>;
+
+constexpr size_t kMaxTotalSize = 32 * 1024;
+constexpr size_t kMaxPreloadSize = 8 * 1024;
+
+constexpr size_t kMaxKeySize = kMaxPreloadSize / 4;
+constexpr size_t kMaxValueSize = kMaxPreloadSize / 2;
+
+class MultifileBlobCacheTest : public ::testing::Test {
+protected:
+ virtual void SetUp() {
+ mTempFile.reset(new TemporaryFile());
+ mMBC.reset(new MultifileBlobCache(kMaxTotalSize, kMaxPreloadSize, &mTempFile->path[0]));
+ }
+
+ virtual void TearDown() { mMBC.reset(); }
+
+ std::unique_ptr<TemporaryFile> mTempFile;
+ std::unique_ptr<MultifileBlobCache> mMBC;
+};
+
+TEST_F(MultifileBlobCacheTest, CacheSingleValueSucceeds) {
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+ ASSERT_EQ('e', buf[0]);
+ ASSERT_EQ('f', buf[1]);
+ ASSERT_EQ('g', buf[2]);
+ ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheTwoValuesSucceeds) {
+ unsigned char buf[2] = {0xee, 0xee};
+ mMBC->set("ab", 2, "cd", 2);
+ mMBC->set("ef", 2, "gh", 2);
+ ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+ ASSERT_EQ('c', buf[0]);
+ ASSERT_EQ('d', buf[1]);
+ ASSERT_EQ(size_t(2), mMBC->get("ef", 2, buf, 2));
+ ASSERT_EQ('g', buf[0]);
+ ASSERT_EQ('h', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetSetTwiceSucceeds) {
+ unsigned char buf[2] = {0xee, 0xee};
+ mMBC->set("ab", 2, "cd", 2);
+ ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+ ASSERT_EQ('c', buf[0]);
+ ASSERT_EQ('d', buf[1]);
+ // Use the same key, but different value
+ mMBC->set("ab", 2, "ef", 2);
+ ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+ ASSERT_EQ('e', buf[0]);
+ ASSERT_EQ('f', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesInsideBounds) {
+ unsigned char buf[6] = {0xee, 0xee, 0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf + 1, 4));
+ ASSERT_EQ(0xee, buf[0]);
+ ASSERT_EQ('e', buf[1]);
+ ASSERT_EQ('f', buf[2]);
+ ASSERT_EQ('g', buf[3]);
+ ASSERT_EQ('h', buf[4]);
+ ASSERT_EQ(0xee, buf[5]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesIfBufferIsLargeEnough) {
+ unsigned char buf[3] = {0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 3));
+ ASSERT_EQ(0xee, buf[0]);
+ ASSERT_EQ(0xee, buf[1]);
+ ASSERT_EQ(0xee, buf[2]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetDoesntAccessNullBuffer) {
+ mMBC->set("abcd", 4, "efgh", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, nullptr, 0));
+}
+
+TEST_F(MultifileBlobCacheTest, MultipleSetsCacheLatestValue) {
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ mMBC->set("abcd", 4, "ijkl", 4);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+ ASSERT_EQ('i', buf[0]);
+ ASSERT_EQ('j', buf[1]);
+ ASSERT_EQ('k', buf[2]);
+ ASSERT_EQ('l', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, SecondSetKeepsFirstValueIfTooLarge) {
+ unsigned char buf[kMaxValueSize + 1] = {0xee, 0xee, 0xee, 0xee};
+ mMBC->set("abcd", 4, "efgh", 4);
+ mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+ ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+ ASSERT_EQ('e', buf[0]);
+ ASSERT_EQ('f', buf[1]);
+ ASSERT_EQ('g', buf[2]);
+ ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfKeyIsTooBig) {
+ char key[kMaxKeySize + 1];
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ for (int i = 0; i < kMaxKeySize + 1; i++) {
+ key[i] = 'a';
+ }
+ mMBC->set(key, kMaxKeySize + 1, "bbbb", 4);
+ ASSERT_EQ(size_t(0), mMBC->get(key, kMaxKeySize + 1, buf, 4));
+ ASSERT_EQ(0xee, buf[0]);
+ ASSERT_EQ(0xee, buf[1]);
+ ASSERT_EQ(0xee, buf[2]);
+ ASSERT_EQ(0xee, buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfValueIsTooBig) {
+ char buf[kMaxValueSize + 1];
+ for (int i = 0; i < kMaxValueSize + 1; i++) {
+ buf[i] = 'b';
+ }
+ mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+ for (int i = 0; i < kMaxValueSize + 1; i++) {
+ buf[i] = 0xee;
+ }
+ ASSERT_EQ(size_t(0), mMBC->get("abcd", 4, buf, kMaxValueSize + 1));
+ for (int i = 0; i < kMaxValueSize + 1; i++) {
+ SCOPED_TRACE(i);
+ ASSERT_EQ(0xee, buf[i]);
+ }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxKeySizeSucceeds) {
+ char key[kMaxKeySize];
+ unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+ for (int i = 0; i < kMaxKeySize; i++) {
+ key[i] = 'a';
+ }
+ mMBC->set(key, kMaxKeySize, "wxyz", 4);
+ ASSERT_EQ(size_t(4), mMBC->get(key, kMaxKeySize, buf, 4));
+ ASSERT_EQ('w', buf[0]);
+ ASSERT_EQ('x', buf[1]);
+ ASSERT_EQ('y', buf[2]);
+ ASSERT_EQ('z', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxValueSizeSucceeds) {
+ char buf[kMaxValueSize];
+ for (int i = 0; i < kMaxValueSize; i++) {
+ buf[i] = 'b';
+ }
+ mMBC->set("abcd", 4, buf, kMaxValueSize);
+ for (int i = 0; i < kMaxValueSize; i++) {
+ buf[i] = 0xee;
+ }
+ mMBC->get("abcd", 4, buf, kMaxValueSize);
+ for (int i = 0; i < kMaxValueSize; i++) {
+ SCOPED_TRACE(i);
+ ASSERT_EQ('b', buf[i]);
+ }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMinKeyAndValueSizeSucceeds) {
+ unsigned char buf[1] = {0xee};
+ mMBC->set("x", 1, "y", 1);
+ ASSERT_EQ(size_t(1), mMBC->get("x", 1, buf, 1));
+ ASSERT_EQ('y', buf[0]);
+}
+
+} // namespace android
diff --git a/opengl/libs/EGL/egl_cache.cpp b/opengl/libs/EGL/egl_cache.cpp
index 1e8a348..b00ee33 100644
--- a/opengl/libs/EGL/egl_cache.cpp
+++ b/opengl/libs/EGL/egl_cache.cpp
@@ -14,6 +14,8 @@
** limitations under the License.
*/
+// #define LOG_NDEBUG 0
+
#include "egl_cache.h"
#include <android-base/properties.h>
@@ -25,22 +27,19 @@
#include <thread>
#include "../egl_impl.h"
-#include "egl_cache_multifile.h"
#include "egl_display.h"
// Monolithic cache size limits.
-static const size_t maxKeySize = 12 * 1024;
-static const size_t maxValueSize = 64 * 1024;
-static const size_t maxTotalSize = 32 * 1024 * 1024;
+static const size_t kMaxMonolithicKeySize = 12 * 1024;
+static const size_t kMaxMonolithicValueSize = 64 * 1024;
+static const size_t kMaxMonolithicTotalSize = 2 * 1024 * 1024;
// The time in seconds to wait before saving newly inserted monolithic cache entries.
-static const unsigned int deferredSaveDelay = 4;
+static const unsigned int kDeferredMonolithicSaveDelay = 4;
-// Multifile cache size limit
-constexpr size_t kMultifileCacheByteLimit = 64 * 1024 * 1024;
-
-// Delay before cleaning up multifile cache entries
-static const unsigned int deferredMultifileCleanupDelaySeconds = 1;
+// Multifile cache size limits
+constexpr uint32_t kMultifileHotCacheLimit = 8 * 1024 * 1024;
+constexpr uint32_t kMultifileCacheByteLimit = 32 * 1024 * 1024;
namespace android {
@@ -68,10 +67,7 @@
// egl_cache_t definition
//
egl_cache_t::egl_cache_t()
- : mInitialized(false),
- mMultifileMode(false),
- mCacheByteLimit(maxTotalSize),
- mMultifileCleanupPending(false) {}
+ : mInitialized(false), mMultifileMode(false), mCacheByteLimit(kMaxMonolithicTotalSize) {}
egl_cache_t::~egl_cache_t() {}
@@ -85,7 +81,7 @@
std::lock_guard<std::mutex> lock(mMutex);
egl_connection_t* const cnx = &gEGLImpl;
- if (cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
+ if (display && cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
const char* exts = display->disp.queryString.extensions;
size_t bcExtLen = strlen(BC_EXT_STR);
size_t extsLen = strlen(exts);
@@ -114,14 +110,36 @@
}
}
- // Allow forcing monolithic cache for debug purposes
- if (base::GetProperty("debug.egl.blobcache.multifilemode", "") == "false") {
- ALOGD("Forcing monolithic cache due to debug.egl.blobcache.multifilemode == \"false\"");
+ // Check the device config to decide whether multifile should be used
+ if (base::GetBoolProperty("ro.egl.blobcache.multifile", false)) {
+ mMultifileMode = true;
+ ALOGV("Using multifile EGL blobcache");
+ }
+
+ // Allow forcing the mode for debug purposes
+ std::string mode = base::GetProperty("debug.egl.blobcache.multifile", "");
+ if (mode == "true") {
+ ALOGV("Forcing multifile cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
+ mMultifileMode = true;
+ } else if (mode == "false") {
+ ALOGV("Forcing monolithic cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
mMultifileMode = false;
}
if (mMultifileMode) {
- mCacheByteLimit = kMultifileCacheByteLimit;
+ mCacheByteLimit = static_cast<size_t>(
+ base::GetUintProperty<uint32_t>("ro.egl.blobcache.multifile_limit",
+ kMultifileCacheByteLimit));
+
+ // Check for a debug value
+ int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.multifile_limit", -1);
+ if (debugCacheSize >= 0) {
+ ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.multifile_limit",
+ mCacheByteLimit, debugCacheSize);
+ mCacheByteLimit = debugCacheSize;
+ }
+
+ ALOGV("Using multifile EGL blobcache limit of %zu bytes", mCacheByteLimit);
}
mInitialized = true;
@@ -133,10 +151,10 @@
mBlobCache->writeToFile();
}
mBlobCache = nullptr;
- if (mMultifileMode) {
- checkMultifileCacheSize(mCacheByteLimit);
+ if (mMultifileBlobCache) {
+ mMultifileBlobCache->finish();
}
- mMultifileMode = false;
+ mMultifileBlobCache = nullptr;
mInitialized = false;
}
@@ -151,20 +169,8 @@
if (mInitialized) {
if (mMultifileMode) {
- setBlobMultifile(key, keySize, value, valueSize, mFilename);
-
- if (!mMultifileCleanupPending) {
- mMultifileCleanupPending = true;
- // Kick off a thread to cull cache files below limit
- std::thread deferredMultifileCleanupThread([this]() {
- sleep(deferredMultifileCleanupDelaySeconds);
- std::lock_guard<std::mutex> lock(mMutex);
- // Check the size of cache and remove entries to stay under limit
- checkMultifileCacheSize(mCacheByteLimit);
- mMultifileCleanupPending = false;
- });
- deferredMultifileCleanupThread.detach();
- }
+ MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+ mbc->set(key, keySize, value, valueSize);
} else {
BlobCache* bc = getBlobCacheLocked();
bc->set(key, keySize, value, valueSize);
@@ -172,7 +178,7 @@
if (!mSavePending) {
mSavePending = true;
std::thread deferredSaveThread([this]() {
- sleep(deferredSaveDelay);
+ sleep(kDeferredMonolithicSaveDelay);
std::lock_guard<std::mutex> lock(mMutex);
if (mInitialized && mBlobCache) {
mBlobCache->writeToFile();
@@ -196,15 +202,21 @@
if (mInitialized) {
if (mMultifileMode) {
- return getBlobMultifile(key, keySize, value, valueSize, mFilename);
+ MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+ return mbc->get(key, keySize, value, valueSize);
} else {
BlobCache* bc = getBlobCacheLocked();
return bc->get(key, keySize, value, valueSize);
}
}
+
return 0;
}
+void egl_cache_t::setCacheMode(EGLCacheMode cacheMode) {
+ mMultifileMode = (cacheMode == EGLCacheMode::Multifile);
+}
+
void egl_cache_t::setCacheFilename(const char* filename) {
std::lock_guard<std::mutex> lock(mMutex);
mFilename = filename;
@@ -216,7 +228,7 @@
if (!mMultifileMode) {
// If we're not in multifile mode, ensure the cache limit is only being lowered,
// not increasing above the hard coded platform limit
- if (cacheByteLimit > maxTotalSize) {
+ if (cacheByteLimit > kMaxMonolithicTotalSize) {
return;
}
}
@@ -226,8 +238,8 @@
size_t egl_cache_t::getCacheSize() {
std::lock_guard<std::mutex> lock(mMutex);
- if (mMultifileMode) {
- return getMultifileCacheSize();
+ if (mMultifileBlobCache) {
+ return mMultifileBlobCache->getTotalSize();
}
if (mBlobCache) {
return mBlobCache->getSize();
@@ -237,9 +249,18 @@
BlobCache* egl_cache_t::getBlobCacheLocked() {
if (mBlobCache == nullptr) {
- mBlobCache.reset(new FileBlobCache(maxKeySize, maxValueSize, mCacheByteLimit, mFilename));
+ mBlobCache.reset(new FileBlobCache(kMaxMonolithicKeySize, kMaxMonolithicValueSize,
+ mCacheByteLimit, mFilename));
}
return mBlobCache.get();
}
+MultifileBlobCache* egl_cache_t::getMultifileBlobCacheLocked() {
+ if (mMultifileBlobCache == nullptr) {
+ mMultifileBlobCache.reset(
+ new MultifileBlobCache(mCacheByteLimit, kMultifileHotCacheLimit, mFilename));
+ }
+ return mMultifileBlobCache.get();
+}
+
}; // namespace android
diff --git a/opengl/libs/EGL/egl_cache.h b/opengl/libs/EGL/egl_cache.h
index 2dcd803..1399368 100644
--- a/opengl/libs/EGL/egl_cache.h
+++ b/opengl/libs/EGL/egl_cache.h
@@ -25,6 +25,7 @@
#include <string>
#include "FileBlobCache.h"
+#include "MultifileBlobCache.h"
namespace android {
@@ -32,6 +33,11 @@
class EGLAPI egl_cache_t {
public:
+ enum class EGLCacheMode {
+ Monolithic,
+ Multifile,
+ };
+
// get returns a pointer to the singleton egl_cache_t object. This
// singleton object will never be destroyed.
static egl_cache_t* get();
@@ -64,6 +70,9 @@
// cache contents from one program invocation to another.
void setCacheFilename(const char* filename);
+ // Allow setting monolithic or multifile modes
+ void setCacheMode(EGLCacheMode cacheMode);
+
// Allow the fixed cache limit to be overridden
void setCacheLimit(int64_t cacheByteLimit);
@@ -85,6 +94,9 @@
// possible.
BlobCache* getBlobCacheLocked();
+ // Get or create the multifile blobcache
+ MultifileBlobCache* getMultifileBlobCacheLocked();
+
// mInitialized indicates whether the egl_cache_t is in the initialized
// state. It is initialized to false at construction time, and gets set to
// true when initialize is called. It is set back to false when terminate
@@ -98,6 +110,9 @@
// first time it's needed.
std::unique_ptr<FileBlobCache> mBlobCache;
+ // The multifile version of blobcache allowing larger contents to be stored
+ std::unique_ptr<MultifileBlobCache> mMultifileBlobCache;
+
// mFilename is the name of the file for storing cache contents in between
// program invocations. It is initialized to an empty string at
// construction time, and can be set with the setCacheFilename method. An
@@ -123,11 +138,7 @@
bool mMultifileMode;
// Cache limit
- int64_t mCacheByteLimit;
-
- // Whether we've kicked off a side thread that will check the multifile
- // cache size and remove entries if needed.
- bool mMultifileCleanupPending;
+ size_t mCacheByteLimit;
};
}; // namespace android
diff --git a/opengl/libs/EGL/egl_cache_multifile.cpp b/opengl/libs/EGL/egl_cache_multifile.cpp
deleted file mode 100644
index 48e557f..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-
-#include "egl_cache_multifile.h"
-
-#include <android-base/properties.h>
-#include <dirent.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <log/log.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <utime.h>
-
-#include <algorithm>
-#include <chrono>
-#include <fstream>
-#include <limits>
-#include <locale>
-#include <map>
-#include <sstream>
-#include <unordered_map>
-
-#include <utils/JenkinsHash.h>
-
-static std::string multifileDirName = "";
-
-using namespace std::literals;
-
-namespace {
-
-// Create a directory for tracking multiple files
-void setupMultifile(const std::string& baseDir) {
- // If we've already set up the multifile dir in this base directory, we're done
- if (!multifileDirName.empty() && multifileDirName.find(baseDir) != std::string::npos) {
- return;
- }
-
- // Otherwise, create it
- multifileDirName = baseDir + ".multifile";
- if (mkdir(multifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
- ALOGW("Unable to create directory (%s), errno (%i)", multifileDirName.c_str(), errno);
- }
-}
-
-// Create a filename that is based on the hash of the key
-std::string getCacheEntryFilename(const void* key, EGLsizeiANDROID keySize,
- const std::string& baseDir) {
- // Hash the key into a string
- std::stringstream keyName;
- keyName << android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
-
- // Build a filename using dir and hash
- return baseDir + "/" + keyName.str();
-}
-
-// Determine file age based on stat modification time
-// Newer files have a higher age (time since epoch)
-time_t getFileAge(const std::string& filePath) {
- struct stat st;
- if (stat(filePath.c_str(), &st) == 0) {
- ALOGD("getFileAge returning %" PRId64 " for file age", static_cast<uint64_t>(st.st_mtime));
- return st.st_mtime;
- } else {
- ALOGW("Failed to stat %s", filePath.c_str());
- return 0;
- }
-}
-
-size_t getFileSize(const std::string& filePath) {
- struct stat st;
- if (stat(filePath.c_str(), &st) != 0) {
- ALOGE("Unable to stat %s", filePath.c_str());
- return 0;
- }
- return st.st_size;
-}
-
-// Walk through directory entries and track age and size
-// Then iterate through the entries, oldest first, and remove them until under the limit.
-// This will need to be updated if we move to a multilevel cache dir.
-bool applyLRU(size_t cacheLimit) {
- // Build a multimap of files indexed by age.
- // They will be automatically sorted smallest (oldest) to largest (newest)
- std::multimap<time_t, std::string> agesToFiles;
-
- // Map files to sizes
- std::unordered_map<std::string, size_t> filesToSizes;
-
- size_t totalCacheSize = 0;
-
- DIR* dir;
- struct dirent* entry;
- if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
- while ((entry = readdir(dir)) != nullptr) {
- if (entry->d_name == "."s || entry->d_name == ".."s) {
- continue;
- }
-
- // Look up each file age
- std::string fullPath = multifileDirName + "/" + entry->d_name;
- time_t fileAge = getFileAge(fullPath);
-
- // Track the files, sorted by age
- agesToFiles.insert(std::make_pair(fileAge, fullPath));
-
- // Also track the size so we know how much room we have freed
- size_t fileSize = getFileSize(fullPath);
- filesToSizes[fullPath] = fileSize;
- totalCacheSize += fileSize;
- }
- closedir(dir);
- } else {
- ALOGE("Unable to open filename: %s", multifileDirName.c_str());
- return false;
- }
-
- if (totalCacheSize <= cacheLimit) {
- // If LRU was called on a sufficiently small cache, no need to remove anything
- return true;
- }
-
- // Walk through the map of files until we're under the cache size
- for (const auto& cacheEntryIter : agesToFiles) {
- time_t entryAge = cacheEntryIter.first;
- const std::string entryPath = cacheEntryIter.second;
-
- ALOGD("Removing %s with age %ld", entryPath.c_str(), entryAge);
- if (std::remove(entryPath.c_str()) != 0) {
- ALOGE("Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
- return false;
- }
-
- totalCacheSize -= filesToSizes[entryPath];
- if (totalCacheSize <= cacheLimit) {
- // Success
- ALOGV("Reduced cache to %zu", totalCacheSize);
- return true;
- } else {
- ALOGD("Cache size is still too large (%zu), removing more files", totalCacheSize);
- }
- }
-
- // Should never reach this return
- return false;
-}
-
-} // namespace
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir) {
- if (baseDir.empty()) {
- return;
- }
-
- setupMultifile(baseDir);
- std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
- ALOGD("Attempting to open filename for set: %s", filename.c_str());
- std::ofstream outfile(filename, std::ofstream::binary);
- if (outfile.fail()) {
- ALOGW("Unable to open filename: %s", filename.c_str());
- return;
- }
-
- // First write the key
- outfile.write(static_cast<const char*>(key), keySize);
- if (outfile.bad()) {
- ALOGW("Unable to write key to filename: %s", filename.c_str());
- outfile.close();
- return;
- }
- ALOGD("Wrote %i bytes to out file for key", static_cast<int>(outfile.tellp()));
-
- // Then write the value
- outfile.write(static_cast<const char*>(value), valueSize);
- if (outfile.bad()) {
- ALOGW("Unable to write value to filename: %s", filename.c_str());
- outfile.close();
- return;
- }
- ALOGD("Wrote %i bytes to out file for full entry", static_cast<int>(outfile.tellp()));
-
- outfile.close();
-}
-
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir) {
- if (baseDir.empty()) {
- return 0;
- }
-
- setupMultifile(baseDir);
- std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
- // Open the hashed filename path
- ALOGD("Attempting to open filename for get: %s", filename.c_str());
- int fd = open(filename.c_str(), O_RDONLY);
-
- // File doesn't exist, this is a MISS, return zero bytes read
- if (fd == -1) {
- ALOGD("Cache MISS - failed to open filename: %s, error: %s", filename.c_str(),
- std::strerror(errno));
- return 0;
- }
-
- ALOGD("Cache HIT - opened filename: %s", filename.c_str());
-
- // Get the size of the file
- size_t entrySize = getFileSize(filename);
- if (keySize > entrySize) {
- ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
- "file",
- keySize, entrySize);
- close(fd);
- return 0;
- }
-
- // Memory map the file
- uint8_t* cacheEntry =
- reinterpret_cast<uint8_t*>(mmap(nullptr, entrySize, PROT_READ, MAP_PRIVATE, fd, 0));
- if (cacheEntry == MAP_FAILED) {
- ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
- close(fd);
- return 0;
- }
-
- // Compare the incoming key with our stored version (the beginning of the entry)
- int compare = memcmp(cacheEntry, key, keySize);
- if (compare != 0) {
- ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
- munmap(cacheEntry, entrySize);
- close(fd);
- return 0;
- }
-
- // Keys matched, so remaining cache is value size
- size_t cachedValueSize = entrySize - keySize;
-
- // Return actual value size if valueSize is not large enough
- if (cachedValueSize > valueSize) {
- ALOGD("Skipping file read, not enough room provided (valueSize): %lu, "
- "returning required space as %zu",
- valueSize, cachedValueSize);
- munmap(cacheEntry, entrySize);
- close(fd);
- return cachedValueSize;
- }
-
- // Remaining entry following the key is the value
- uint8_t* cachedValue = cacheEntry + keySize;
- memcpy(value, cachedValue, cachedValueSize);
- munmap(cacheEntry, entrySize);
- close(fd);
-
- ALOGD("Read %zu bytes from %s", cachedValueSize, filename.c_str());
- return cachedValueSize;
-}
-
-// Walk through the files in our flat directory, checking the size of each one.
-// Return the total size of normal files in the directory.
-// This will need to be updated if we move to a multilevel cache dir.
-size_t getMultifileCacheSize() {
- if (multifileDirName.empty()) {
- return 0;
- }
-
- DIR* dir;
- struct dirent* entry;
- size_t size = 0;
-
- ALOGD("Using %s as the multifile cache dir ", multifileDirName.c_str());
-
- if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
- while ((entry = readdir(dir)) != nullptr) {
- if (entry->d_name == "."s || entry->d_name == ".."s) {
- continue;
- }
-
- // Add up the size of all files in the dir
- std::string fullPath = multifileDirName + "/" + entry->d_name;
- size += getFileSize(fullPath);
- }
- closedir(dir);
- } else {
- ALOGW("Unable to open filename: %s", multifileDirName.c_str());
- return 0;
- }
-
- return size;
-}
-
-// When removing files, what fraction of the overall limit should be reached when removing files
-// A divisor of two will decrease the cache to 50%, four to 25% and so on
-constexpr uint32_t kCacheLimitDivisor = 2;
-
-// Calculate the cache size and remove old entries until under the limit
-void checkMultifileCacheSize(size_t cacheByteLimit) {
- // Start with the value provided by egl_cache
- size_t limit = cacheByteLimit;
-
- // Check for a debug value
- int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.bytelimit", -1);
- if (debugCacheSize >= 0) {
- ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.bytelimit", limit,
- debugCacheSize);
- limit = debugCacheSize;
- }
-
- // Tally up the initial amount of cache in use
- size_t size = getMultifileCacheSize();
- ALOGD("Multifile cache dir size: %zu", size);
-
- // If size is larger than the threshold, remove files using LRU
- if (size > limit) {
- ALOGV("Multifile cache size is larger than %zu, removing old entries", cacheByteLimit);
- if (!applyLRU(limit / kCacheLimitDivisor)) {
- ALOGE("Error when clearing multifile shader cache");
- return;
- }
- }
- ALOGD("Multifile cache size after reduction: %zu", getMultifileCacheSize());
-}
-
-}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/egl_cache_multifile.h b/opengl/libs/EGL/egl_cache_multifile.h
deleted file mode 100644
index ee5fe81..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_EGL_CACHE_MULTIFILE_H
-#define ANDROID_EGL_CACHE_MULTIFILE_H
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-
-#include <string>
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir);
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
- EGLsizeiANDROID valueSize, const std::string& baseDir);
-size_t getMultifileCacheSize();
-void checkMultifileCacheSize(size_t cacheByteLimit);
-
-}; // namespace android
-
-#endif // ANDROID_EGL_CACHE_MULTIFILE_H
diff --git a/opengl/tests/EGLTest/egl_cache_test.cpp b/opengl/tests/EGLTest/egl_cache_test.cpp
index 265bec4..32e408c 100644
--- a/opengl/tests/EGLTest/egl_cache_test.cpp
+++ b/opengl/tests/EGLTest/egl_cache_test.cpp
@@ -24,7 +24,7 @@
#include <android-base/test_utils.h>
#include "egl_cache.h"
-#include "egl_cache_multifile.h"
+#include "MultifileBlobCache.h"
#include "egl_display.h"
#include <memory>
@@ -33,12 +33,16 @@
namespace android {
-class EGLCacheTest : public ::testing::Test {
+class EGLCacheTest : public ::testing::TestWithParam<egl_cache_t::EGLCacheMode> {
protected:
virtual void SetUp() {
- mCache = egl_cache_t::get();
+ // Terminate to clean up any previous cache in this process
+ mCache->terminate();
+
mTempFile.reset(new TemporaryFile());
mCache->setCacheFilename(&mTempFile->path[0]);
+ mCache->setCacheLimit(1024);
+ mCache->setCacheMode(mCacheMode);
}
virtual void TearDown() {
@@ -49,11 +53,12 @@
std::string getCachefileName();
- egl_cache_t* mCache;
+ egl_cache_t* mCache = egl_cache_t::get();
std::unique_ptr<TemporaryFile> mTempFile;
+ egl_cache_t::EGLCacheMode mCacheMode = GetParam();
};
-TEST_F(EGLCacheTest, UninitializedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, UninitializedCacheAlwaysMisses) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->setBlob("abcd", 4, "efgh", 4);
ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf, 4));
@@ -63,7 +68,7 @@
ASSERT_EQ(0xee, buf[3]);
}
-TEST_F(EGLCacheTest, InitializedCacheAlwaysHits) {
+TEST_P(EGLCacheTest, InitializedCacheAlwaysHits) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
mCache->setBlob("abcd", 4, "efgh", 4);
@@ -74,7 +79,7 @@
ASSERT_EQ('h', buf[3]);
}
-TEST_F(EGLCacheTest, TerminatedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, TerminatedCacheAlwaysMisses) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
mCache->setBlob("abcd", 4, "efgh", 4);
@@ -86,7 +91,7 @@
ASSERT_EQ(0xee, buf[3]);
}
-TEST_F(EGLCacheTest, ReinitializedCacheContainsValues) {
+TEST_P(EGLCacheTest, ReinitializedCacheContainsValues) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
mCache->setBlob("abcd", 4, "efgh", 4);
@@ -101,12 +106,12 @@
std::string EGLCacheTest::getCachefileName() {
// Return the monolithic filename unless we find the multifile dir
- std::string cachefileName = &mTempFile->path[0];
- std::string multifileDirName = cachefileName + ".multifile";
+ std::string cachePath = &mTempFile->path[0];
+ std::string multifileDirName = cachePath + ".multifile";
+ std::string cachefileName = "";
struct stat info;
if (stat(multifileDirName.c_str(), &info) == 0) {
-
// Ensure we only have one file to manage
int realFileCount = 0;
@@ -121,6 +126,8 @@
cachefileName = multifileDirName + "/" + entry->d_name;
realFileCount++;
}
+ } else {
+ printf("Unable to open %s, error: %s\n", multifileDirName.c_str(), std::strerror(errno));
}
if (realFileCount != 1) {
@@ -128,14 +135,18 @@
// violates test assumptions
cachefileName = "";
}
+ } else {
+ printf("Unable to stat %s, error: %s\n", multifileDirName.c_str(), std::strerror(errno));
}
return cachefileName;
}
-TEST_F(EGLCacheTest, ModifiedCacheMisses) {
- // Turn this back on if multifile becomes the default
- GTEST_SKIP() << "Skipping test designed for multifile, see b/263574392 and b/246966894";
+TEST_P(EGLCacheTest, ModifiedCacheMisses) {
+ // Skip if not in multifile mode
+ if (mCacheMode == egl_cache_t::EGLCacheMode::Monolithic) {
+ GTEST_SKIP() << "Skipping test designed for multifile";
+ }
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
@@ -147,13 +158,13 @@
ASSERT_EQ('g', buf[2]);
ASSERT_EQ('h', buf[3]);
+ // Ensure the cache file is written to disk
+ mCache->terminate();
+
// Depending on the cache mode, the file will be in different locations
std::string cachefileName = getCachefileName();
ASSERT_TRUE(cachefileName.length() > 0);
- // Ensure the cache file is written to disk
- mCache->terminate();
-
// Stomp on the beginning of the cache file, breaking the key match
const long stomp = 0xbadf00d;
FILE *file = fopen(cachefileName.c_str(), "w");
@@ -164,14 +175,15 @@
// Ensure no cache hit
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
uint8_t buf2[4] = { 0xee, 0xee, 0xee, 0xee };
- ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf2, 4));
+ // getBlob may return junk for required size, but should not return a cache hit
+ mCache->getBlob("abcd", 4, buf2, 4);
ASSERT_EQ(0xee, buf2[0]);
ASSERT_EQ(0xee, buf2[1]);
ASSERT_EQ(0xee, buf2[2]);
ASSERT_EQ(0xee, buf2[3]);
}
-TEST_F(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
+TEST_P(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
@@ -204,4 +216,6 @@
ASSERT_LE(mCache->getCacheSize(), 4);
}
+INSTANTIATE_TEST_CASE_P(MonolithicCacheTests, EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Monolithic));
+INSTANTIATE_TEST_CASE_P(MultifileCacheTests, EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Multifile));
}
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
index 6490476..3ed24b2 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
@@ -391,7 +391,9 @@
void LayerSnapshotBuilder::updateSnapshots(const Args& args) {
ATRACE_NAME("UpdateSnapshots");
- if (args.forceUpdate || args.displayChanges) {
+ if (args.parentCrop) {
+ mRootSnapshot.geomLayerBounds = *args.parentCrop;
+ } else if (args.forceUpdate || args.displayChanges) {
mRootSnapshot.geomLayerBounds = getMaxDisplayBounds(args.displays);
}
if (args.displayChanges) {
@@ -618,7 +620,8 @@
RequestedLayerState::Changes::AffectsChildren);
snapshot.changes = parentChanges | requested.changes;
snapshot.isHiddenByPolicyFromParent = parentSnapshot.isHiddenByPolicyFromParent ||
- parentSnapshot.invalidTransform || requested.isHiddenByPolicy();
+ parentSnapshot.invalidTransform || requested.isHiddenByPolicy() ||
+ (args.excludeLayerIds.find(path.id) != args.excludeLayerIds.end());
snapshot.contentDirty = requested.what & layer_state_t::CONTENT_DIRTY;
// TODO(b/238781169) scope down the changes to only buffer updates.
snapshot.hasReadyFrame =
@@ -983,6 +986,20 @@
}
}
+// Visit each visible snapshot in z-order
+void LayerSnapshotBuilder::forEachVisibleSnapshot(const ConstVisitor& visitor,
+ const LayerHierarchy& root) const {
+ root.traverseInZOrder(
+ [this, visitor](const LayerHierarchy&,
+ const LayerHierarchy::TraversalPath& traversalPath) -> bool {
+ LayerSnapshot* snapshot = getSnapshot(traversalPath);
+ if (snapshot && snapshot->isVisible) {
+ visitor(*snapshot);
+ }
+ return true;
+ });
+}
+
void LayerSnapshotBuilder::forEachVisibleSnapshot(const Visitor& visitor) {
for (int i = 0; i < mNumInterestingSnapshots; i++) {
std::unique_ptr<LayerSnapshot>& snapshot = mSnapshots.at((size_t)i);
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
index abb7e66..f4544fd 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
@@ -36,7 +36,7 @@
class LayerSnapshotBuilder {
public:
struct Args {
- const LayerHierarchy& root;
+ LayerHierarchy root;
const LayerLifecycleManager& layerLifecycleManager;
bool forceUpdate = false;
bool includeMetadata = false;
@@ -46,6 +46,8 @@
const renderengine::ShadowSettings& globalShadowSettings;
bool supportsBlur = true;
bool forceFullDamage = false;
+ std::optional<FloatRect> parentCrop = std::nullopt;
+ std::unordered_set<uint32_t> excludeLayerIds;
};
LayerSnapshotBuilder();
@@ -65,6 +67,9 @@
// Visit each visible snapshot in z-order
void forEachVisibleSnapshot(const ConstVisitor& visitor) const;
+ // Visit each visible snapshot in z-order
+ void forEachVisibleSnapshot(const ConstVisitor& visitor, const LayerHierarchy& root) const;
+
typedef std::function<void(std::unique_ptr<LayerSnapshot>& snapshot)> Visitor;
// Visit each visible snapshot in z-order and move the snapshot if needed
void forEachVisibleSnapshot(const Visitor& visitor);
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index b6b9965..31ee91e 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -146,7 +146,7 @@
mLayerCreationFlags(args.flags),
mBorderEnabled(false),
mTextureName(args.textureName),
- mLayerFE(args.flinger->getFactory().createLayerFE(mName)) {
+ mLegacyLayerFE(args.flinger->getFactory().createLayerFE(mName)) {
ALOGV("Creating Layer %s", getDebugName());
uint32_t layerFlags = 0;
@@ -3098,15 +3098,14 @@
return true;
}
-bool Layer::setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& handles) {
+bool Layer::setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& handles,
+ bool willPresent) {
// If there is no handle, we will not send a callback so reset mReleasePreviousBuffer and return
if (handles.empty()) {
mReleasePreviousBuffer = false;
return false;
}
- const bool willPresent = willPresentCurrentTransaction();
-
for (const auto& handle : handles) {
// If this transaction set a buffer on this layer, release its previous buffer
handle->releasePreviousBuffer = mReleasePreviousBuffer;
@@ -3180,11 +3179,10 @@
return fenceSignaled;
}
-bool Layer::onPreComposition(nsecs_t refreshStartTime) {
+void Layer::onPreComposition(nsecs_t refreshStartTime) {
for (const auto& handle : mDrawingState.callbackHandles) {
handle->refreshStartTime = refreshStartTime;
}
- return hasReadyFrame();
}
void Layer::setAutoRefresh(bool autoRefresh) {
@@ -3570,7 +3568,7 @@
sp<LayerFE> Layer::getCompositionEngineLayerFE() const {
// There's no need to get a CE Layer if the layer isn't going to draw anything.
- return hasSomethingToDraw() ? mLayerFE : nullptr;
+ return hasSomethingToDraw() ? mLegacyLayerFE : nullptr;
}
const LayerSnapshot* Layer::getLayerSnapshot() const {
@@ -3581,16 +3579,36 @@
return mSnapshot.get();
}
+std::unique_ptr<frontend::LayerSnapshot> Layer::stealLayerSnapshot() {
+ return std::move(mSnapshot);
+}
+
+void Layer::updateLayerSnapshot(std::unique_ptr<frontend::LayerSnapshot> snapshot) {
+ mSnapshot = std::move(snapshot);
+}
+
const compositionengine::LayerFECompositionState* Layer::getCompositionState() const {
return mSnapshot.get();
}
sp<LayerFE> Layer::copyCompositionEngineLayerFE() const {
- auto result = mFlinger->getFactory().createLayerFE(mLayerFE->getDebugName());
+ auto result = mFlinger->getFactory().createLayerFE(mName);
result->mSnapshot = std::make_unique<LayerSnapshot>(*mSnapshot);
return result;
}
+sp<LayerFE> Layer::getCompositionEngineLayerFE(
+ const frontend::LayerHierarchy::TraversalPath& path) {
+ for (auto& [p, layerFE] : mLayerFEs) {
+ if (p == path) {
+ return layerFE;
+ }
+ }
+ auto layerFE = mFlinger->getFactory().createLayerFE(mName);
+ mLayerFEs.emplace_back(path, layerFE);
+ return layerFE;
+}
+
void Layer::useSurfaceDamage() {
if (mFlinger->mForceFullDamage) {
surfaceDamageRegion = Region::INVALID_REGION;
@@ -3986,28 +4004,6 @@
}
}
-LayerSnapshotGuard::LayerSnapshotGuard(Layer* layer) : mLayer(layer) {
- if (mLayer) {
- mLayer->mLayerFE->mSnapshot = std::move(mLayer->mSnapshot);
- }
-}
-
-LayerSnapshotGuard::~LayerSnapshotGuard() {
- if (mLayer) {
- mLayer->mSnapshot = std::move(mLayer->mLayerFE->mSnapshot);
- }
-}
-
-LayerSnapshotGuard::LayerSnapshotGuard(LayerSnapshotGuard&& other) : mLayer(other.mLayer) {
- other.mLayer = nullptr;
-}
-
-LayerSnapshotGuard& LayerSnapshotGuard::operator=(LayerSnapshotGuard&& other) {
- mLayer = other.mLayer;
- other.mLayer = nullptr;
- return *this;
-}
-
void Layer::setTrustedPresentationInfo(TrustedPresentationThresholds const& thresholds,
TrustedPresentationListener const& listener) {
bool hadTrustedPresentationListener = hasTrustedPresentationListener();
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index f858224..3d4f03f 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -307,7 +307,8 @@
bool setSurfaceDamageRegion(const Region& /*surfaceDamage*/);
bool setApi(int32_t /*api*/);
bool setSidebandStream(const sp<NativeHandle>& /*sidebandStream*/);
- bool setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& /*handles*/);
+ bool setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& /*handles*/,
+ bool willPresent);
virtual bool setBackgroundColor(const half3& color, float alpha, ui::Dataspace dataspace);
virtual bool setColorSpaceAgnostic(const bool agnostic);
virtual bool setDimmingEnabled(const bool dimmingEnabled);
@@ -328,9 +329,12 @@
virtual sp<LayerFE> getCompositionEngineLayerFE() const;
virtual sp<LayerFE> copyCompositionEngineLayerFE() const;
+ sp<LayerFE> getCompositionEngineLayerFE(const frontend::LayerHierarchy::TraversalPath&);
const frontend::LayerSnapshot* getLayerSnapshot() const;
frontend::LayerSnapshot* editLayerSnapshot();
+ std::unique_ptr<frontend::LayerSnapshot> stealLayerSnapshot();
+ void updateLayerSnapshot(std::unique_ptr<frontend::LayerSnapshot> snapshot);
// If we have received a new buffer this frame, we will pass its surface
// damage down to hardware composer. Otherwise, we must send a region with
@@ -512,7 +516,7 @@
// implements compositionengine::LayerFE
const compositionengine::LayerFECompositionState* getCompositionState() const;
bool fenceHasSignaled() const;
- bool onPreComposition(nsecs_t refreshStartTime);
+ void onPreComposition(nsecs_t refreshStartTime);
void onLayerDisplayed(ftl::SharedFuture<FenceResult>);
void setWasClientComposed(const sp<Fence>& fence) {
@@ -832,6 +836,7 @@
void updateMetadataSnapshot(const LayerMetadata& parentMetadata);
void updateRelativeMetadataSnapshot(const LayerMetadata& relativeLayerMetadata,
std::unordered_set<Layer*>& visited);
+ bool willPresentCurrentTransaction() const;
protected:
// For unit tests
@@ -1037,8 +1042,6 @@
// Crop that applies to the buffer
Rect computeBufferCrop(const State& s);
- bool willPresentCurrentTransaction() const;
-
void callReleaseBufferCallback(const sp<ITransactionCompletedListener>& listener,
const sp<GraphicBuffer>& buffer, uint64_t framenumber,
const sp<Fence>& releaseFence,
@@ -1146,34 +1149,10 @@
// not specify a destination frame.
ui::Transform mRequestedTransform;
- sp<LayerFE> mLayerFE;
+ sp<LayerFE> mLegacyLayerFE;
+ std::vector<std::pair<frontend::LayerHierarchy::TraversalPath, sp<LayerFE>>> mLayerFEs;
std::unique_ptr<frontend::LayerSnapshot> mSnapshot =
std::make_unique<frontend::LayerSnapshot>();
-
- friend class LayerSnapshotGuard;
-};
-
-// LayerSnapshotGuard manages the movement of LayerSnapshot between a Layer and its corresponding
-// LayerFE. This class must be used whenever LayerFEs are passed to CompositionEngine. Instances of
-// LayerSnapshotGuard should only be constructed on the main thread and should not be moved outside
-// the main thread.
-//
-// Moving the snapshot instead of sharing common state prevents use of LayerFE outside the main
-// thread by making errors obvious (i.e. use outside the main thread results in SEGFAULTs due to
-// nullptr dereference).
-class LayerSnapshotGuard {
-public:
- LayerSnapshotGuard(Layer* layer) REQUIRES(kMainThreadContext);
- ~LayerSnapshotGuard() REQUIRES(kMainThreadContext);
-
- LayerSnapshotGuard(const LayerSnapshotGuard&) = delete;
- LayerSnapshotGuard& operator=(const LayerSnapshotGuard&) = delete;
-
- LayerSnapshotGuard(LayerSnapshotGuard&& other) REQUIRES(kMainThreadContext);
- LayerSnapshotGuard& operator=(LayerSnapshotGuard&& other) REQUIRES(kMainThreadContext);
-
-private:
- Layer* mLayer;
};
std::ostream& operator<<(std::ostream& stream, const Layer::FrameRate& rate);
diff --git a/services/surfaceflinger/LayerRenderArea.cpp b/services/surfaceflinger/LayerRenderArea.cpp
index 2b4375b..03a7f22 100644
--- a/services/surfaceflinger/LayerRenderArea.cpp
+++ b/services/surfaceflinger/LayerRenderArea.cpp
@@ -69,6 +69,14 @@
void LayerRenderArea::render(std::function<void()> drawLayers) {
using namespace std::string_literals;
+ if (!mChildrenOnly) {
+ mTransform = mLayer->getTransform().inverse();
+ }
+
+ if (mFlinger.mLayerLifecycleManagerEnabled) {
+ drawLayers();
+ return;
+ }
// If layer is offscreen, update mirroring info if it exists
if (mLayer->isRemovedFromCurrentState()) {
mLayer->traverse(LayerVector::StateSet::Drawing,
@@ -78,7 +86,6 @@
}
if (!mChildrenOnly) {
- mTransform = mLayer->getTransform().inverse();
// If the layer is offscreen, compute bounds since we don't compute bounds for offscreen
// layers in a regular cycles.
if (mLayer->isRemovedFromCurrentState()) {
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index 76e9416..5e79a5c 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -532,6 +532,12 @@
const sp<EventThreadConnection>& connection) const {
const auto throttleVsync = [&] {
const auto& vsyncData = event.vsync.vsyncData;
+ if (connection->frameRate.isValid()) {
+ return !mVsyncSchedule->getTracker()
+ .isVSyncInPhase(vsyncData.preferredExpectedPresentationTime(),
+ connection->frameRate);
+ }
+
const auto expectedPresentTime =
TimePoint::fromNs(vsyncData.preferredExpectedPresentationTime());
return !mEventThreadCallback.isVsyncTargetForUid(expectedPresentTime,
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index b86553b..aa27091 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -97,6 +97,9 @@
const uid_t mOwnerUid;
const EventRegistrationFlags mEventRegistration;
+ /** The frame rate set to the attached choreographer. */
+ Fps frameRate;
+
private:
virtual void onFirstRef();
EventThread* const mEventThread;
diff --git a/services/surfaceflinger/Scheduler/LayerHistory.cpp b/services/surfaceflinger/Scheduler/LayerHistory.cpp
index 55fa402..e853833 100644
--- a/services/surfaceflinger/Scheduler/LayerHistory.cpp
+++ b/services/surfaceflinger/Scheduler/LayerHistory.cpp
@@ -32,6 +32,7 @@
#include <utility>
#include "../Layer.h"
+#include "EventThread.h"
#include "LayerInfo.h"
namespace android::scheduler {
@@ -140,6 +141,22 @@
info->setLastPresentTime(presentTime, now, updateType, mModeChangePending, layerProps);
+ // Set frame rate to attached choreographer.
+ // TODO(b/260898223): Change to use layer hierarchy and handle frame rate vote.
+ if (updateType == LayerUpdateType::SetFrameRate) {
+ auto range = mAttachedChoreographers.equal_range(id);
+ auto it = range.first;
+ while (it != range.second) {
+ sp<EventThreadConnection> choreographerConnection = it->second.promote();
+ if (choreographerConnection) {
+ choreographerConnection->frameRate = layer->getFrameRateForLayerTree().rate;
+ it++;
+ } else {
+ it = mAttachedChoreographers.erase(it);
+ }
+ }
+ }
+
// Activate layer if inactive.
if (found == LayerStatus::LayerInInactiveMap) {
mActiveLayerInfos.insert(
@@ -294,6 +311,12 @@
return 0.f;
}
+void LayerHistory::attachChoreographer(int32_t layerId,
+ const sp<EventThreadConnection>& choreographerConnection) {
+ std::lock_guard lock(mLock);
+ mAttachedChoreographers.insert({layerId, wp<EventThreadConnection>(choreographerConnection)});
+}
+
auto LayerHistory::findLayer(int32_t id) -> std::pair<LayerStatus, LayerPair*> {
// the layer could be in either the active or inactive map, try both
auto it = mActiveLayerInfos.find(id);
diff --git a/services/surfaceflinger/Scheduler/LayerHistory.h b/services/surfaceflinger/Scheduler/LayerHistory.h
index 5022906..68e7030 100644
--- a/services/surfaceflinger/Scheduler/LayerHistory.h
+++ b/services/surfaceflinger/Scheduler/LayerHistory.h
@@ -27,6 +27,8 @@
#include <utility>
#include <vector>
+#include "EventThread.h"
+
#include "RefreshRateSelector.h"
namespace android {
@@ -80,6 +82,9 @@
// return the frames per second of the layer with the given sequence id.
float getLayerFramerate(nsecs_t now, int32_t id) const;
+ void attachChoreographer(int32_t layerId,
+ const sp<EventThreadConnection>& choreographerConnection);
+
private:
friend class LayerHistoryTest;
friend class TestableScheduler;
@@ -117,6 +122,10 @@
LayerInfos mActiveLayerInfos GUARDED_BY(mLock);
LayerInfos mInactiveLayerInfos GUARDED_BY(mLock);
+ // Map keyed by layer ID (sequence) to choreographer connections.
+ std::unordered_multimap<int32_t, wp<EventThreadConnection>> mAttachedChoreographers
+ GUARDED_BY(mLock);
+
uint32_t mDisplayArea = 0;
// Whether to emit systrace output and debug logs.
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index e6f4665..eed57ef 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -47,6 +47,7 @@
#include "Display/DisplayMap.h"
#include "EventThread.h"
#include "FrameRateOverrideMappings.h"
+#include "FrontEnd/LayerHandle.h"
#include "OneShotTimer.h"
#include "SurfaceFlingerProperties.h"
#include "VSyncPredictor.h"
@@ -232,15 +233,21 @@
}
sp<EventThreadConnection> Scheduler::createConnectionInternal(
- EventThread* eventThread, EventRegistrationFlags eventRegistration) {
- return eventThread->createEventConnection([&] { resync(); }, eventRegistration);
+ EventThread* eventThread, EventRegistrationFlags eventRegistration,
+ const sp<IBinder>& layerHandle) {
+ int32_t layerId = static_cast<int32_t>(LayerHandle::getLayerId(layerHandle));
+ auto connection = eventThread->createEventConnection([&] { resync(); }, eventRegistration);
+ mLayerHistory.attachChoreographer(layerId, connection);
+ return connection;
}
sp<IDisplayEventConnection> Scheduler::createDisplayEventConnection(
- ConnectionHandle handle, EventRegistrationFlags eventRegistration) {
+ ConnectionHandle handle, EventRegistrationFlags eventRegistration,
+ const sp<IBinder>& layerHandle) {
std::lock_guard<std::mutex> lock(mConnectionsLock);
RETURN_IF_INVALID_HANDLE(handle, nullptr);
- return createConnectionInternal(mConnections[handle].thread.get(), eventRegistration);
+ return createConnectionInternal(mConnections[handle].thread.get(), eventRegistration,
+ layerHandle);
}
sp<EventThreadConnection> Scheduler::getEventConnection(ConnectionHandle handle) {
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index 8dc2def..8c8fc21 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -147,7 +147,8 @@
std::chrono::nanoseconds readyDuration);
sp<IDisplayEventConnection> createDisplayEventConnection(
- ConnectionHandle, EventRegistrationFlags eventRegistration = {});
+ ConnectionHandle, EventRegistrationFlags eventRegistration = {},
+ const sp<IBinder>& layerHandle = nullptr);
sp<EventThreadConnection> getEventConnection(ConnectionHandle);
@@ -302,7 +303,8 @@
// Create a connection on the given EventThread.
ConnectionHandle createConnection(std::unique_ptr<EventThread>);
sp<EventThreadConnection> createConnectionInternal(
- EventThread*, EventRegistrationFlags eventRegistration = {});
+ EventThread*, EventRegistrationFlags eventRegistration = {},
+ const sp<IBinder>& layerHandle = nullptr);
// Update feature state machine to given state when corresponding timer resets or expires.
void kernelIdleTimerCallback(TimerState) EXCLUDES(mDisplayLock);
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 20787c4..a0c3eb0 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -173,6 +173,8 @@
using namespace hardware::configstore;
using namespace hardware::configstore::V1_0;
using namespace sysprop;
+using ftl::Flags;
+using namespace ftl::flag_operators;
using aidl::android::hardware::graphics::common::DisplayDecorationSupport;
using aidl::android::hardware::graphics::composer3::Capability;
@@ -470,6 +472,10 @@
mPowerHintSessionMode =
{.late = base::GetBoolProperty("debug.sf.send_late_power_session_hint"s, true),
.early = base::GetBoolProperty("debug.sf.send_early_power_session_hint"s, false)};
+ mLayerLifecycleManagerEnabled =
+ base::GetBoolProperty("debug.sf.enable_layer_lifecycle_manager"s, false);
+ mLegacyFrontEndEnabled = !mLayerLifecycleManagerEnabled ||
+ base::GetBoolProperty("debug.sf.enable_legacy_frontend"s, true);
}
LatchUnsignaledConfig SurfaceFlinger::getLatchUnsignaledConfig() {
@@ -1988,13 +1994,14 @@
// ----------------------------------------------------------------------------
sp<IDisplayEventConnection> SurfaceFlinger::createDisplayEventConnection(
- gui::ISurfaceComposer::VsyncSource vsyncSource, EventRegistrationFlags eventRegistration) {
+ gui::ISurfaceComposer::VsyncSource vsyncSource, EventRegistrationFlags eventRegistration,
+ const sp<IBinder>& layerHandle) {
const auto& handle =
vsyncSource == gui::ISurfaceComposer::VsyncSource::eVsyncSourceSurfaceFlinger
? mSfConnectionHandle
: mAppConnectionHandle;
- return mScheduler->createDisplayEventConnection(handle, eventRegistration);
+ return mScheduler->createDisplayEventConnection(handle, eventRegistration, layerHandle);
}
void SurfaceFlinger::scheduleCommit(FrameHint hint) {
@@ -2136,6 +2143,110 @@
}
}
+bool SurfaceFlinger::updateLayerSnapshotsLegacy(VsyncId vsyncId, LifecycleUpdate& update,
+ bool transactionsFlushed,
+ bool& outTransactionsAreEmpty) {
+ bool needsTraversal = false;
+ if (transactionsFlushed) {
+ needsTraversal |= commitMirrorDisplays(vsyncId);
+ needsTraversal |= commitCreatedLayers(vsyncId, update.layerCreatedStates);
+ needsTraversal |= applyTransactions(update.transactions, vsyncId);
+ }
+ outTransactionsAreEmpty = !needsTraversal;
+ const bool shouldCommit = (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
+ if (shouldCommit) {
+ commitTransactions();
+ }
+
+ bool mustComposite = latchBuffers() || shouldCommit;
+ updateLayerGeometry();
+ return mustComposite;
+}
+
+bool SurfaceFlinger::updateLayerSnapshots(VsyncId vsyncId, LifecycleUpdate& update,
+ bool transactionsFlushed, bool& outTransactionsAreEmpty) {
+ using Changes = frontend::RequestedLayerState::Changes;
+ ATRACE_NAME("updateLayerSnapshots");
+ {
+ mLayerLifecycleManager.addLayers(std::move(update.newLayers));
+ mLayerLifecycleManager.applyTransactions(update.transactions);
+ mLayerLifecycleManager.onHandlesDestroyed(update.destroyedHandles);
+ for (auto& legacyLayer : update.layerCreatedStates) {
+ sp<Layer> layer = legacyLayer.layer.promote();
+ if (layer) {
+ mLegacyLayers[layer->sequence] = layer;
+ }
+ }
+ }
+ if (mLayerLifecycleManager.getGlobalChanges().test(Changes::Hierarchy)) {
+ ATRACE_NAME("LayerHierarchyBuilder:update");
+ mLayerHierarchyBuilder.update(mLayerLifecycleManager.getLayers(),
+ mLayerLifecycleManager.getDestroyedLayers());
+ }
+
+ applyAndCommitDisplayTransactionStates(update.transactions);
+
+ {
+ ATRACE_NAME("LayerSnapshotBuilder:update");
+ frontend::LayerSnapshotBuilder::Args args{.root = mLayerHierarchyBuilder.getHierarchy(),
+ .layerLifecycleManager = mLayerLifecycleManager,
+ .displays = mFrontEndDisplayInfos,
+ .displayChanges = mFrontEndDisplayInfosChanged,
+ .globalShadowSettings =
+ mDrawingState.globalShadowSettings,
+ .supportsBlur = mSupportsBlur,
+ .forceFullDamage = mForceFullDamage};
+ mLayerSnapshotBuilder.update(args);
+ }
+
+ if (mLayerLifecycleManager.getGlobalChanges().any(Changes::Geometry | Changes::Input |
+ Changes::Hierarchy)) {
+ mUpdateInputInfo = true;
+ }
+ if (mLayerLifecycleManager.getGlobalChanges().any(Changes::VisibleRegion | Changes::Hierarchy |
+ Changes::Visibility)) {
+ mVisibleRegionsDirty = true;
+ }
+ outTransactionsAreEmpty = mLayerLifecycleManager.getGlobalChanges().get() == 0;
+ const bool mustComposite = mLayerLifecycleManager.getGlobalChanges().get() != 0;
+ {
+ ATRACE_NAME("LLM:commitChanges");
+ mLayerLifecycleManager.commitChanges();
+ }
+
+ if (!mLegacyFrontEndEnabled) {
+ ATRACE_NAME("DisplayCallbackAndStatsUpdates");
+ applyTransactions(update.transactions, vsyncId);
+
+ bool newDataLatched = false;
+ for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
+ if (!snapshot->changes.test(Changes::Buffer)) continue;
+ auto it = mLegacyLayers.find(snapshot->sequence);
+ LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(), "Couldnt find layer object for %s",
+ snapshot->getDebugString().c_str());
+ mLayersWithQueuedFrames.emplace(it->second);
+ newDataLatched = true;
+ if (!snapshot->isVisible) break;
+
+ Region visibleReg;
+ visibleReg.set(snapshot->transformedBoundsWithoutTransparentRegion);
+ invalidateLayerStack(snapshot->outputFilter, visibleReg);
+ }
+
+ for (auto& destroyedLayer : mLayerLifecycleManager.getDestroyedLayers()) {
+ mLegacyLayers.erase(destroyedLayer->id);
+ }
+
+ // enter boot animation on first buffer latch
+ if (CC_UNLIKELY(mBootStage == BootStage::BOOTLOADER && newDataLatched)) {
+ ALOGI("Enter boot animation");
+ mBootStage = BootStage::BOOTANIMATION;
+ }
+ commitTransactions();
+ }
+ return mustComposite;
+}
+
bool SurfaceFlinger::commit(TimePoint frameTime, VsyncId vsyncId, TimePoint expectedVsyncTime)
FTL_FAKE_GUARD(kMainThreadContext) {
// The expectedVsyncTime, which was predicted when this frame was scheduled, is normally in the
@@ -2275,45 +2386,34 @@
mFrameTimeline->setSfWakeUp(vsyncId.value, frameTime.ns(),
Fps::fromPeriodNsecs(vsyncPeriod.ns()));
- bool needsTraversal = false;
- if (clearTransactionFlags(eTransactionFlushNeeded)) {
- // Locking:
- // 1. to prevent onHandleDestroyed from being called while the state lock is held,
- // we must keep a copy of the transactions (specifically the composer
- // states) around outside the scope of the lock.
- // 2. Transactions and created layers do not share a lock. To prevent applying
- // transactions with layers still in the createdLayer queue, flush the transactions
- // before committing the created layers.
- std::vector<TransactionState> transactions = mTransactionHandler.flushTransactions();
- needsTraversal |= commitMirrorDisplays(vsyncId);
- needsTraversal |= commitCreatedLayers(vsyncId);
- needsTraversal |= applyTransactions(transactions, vsyncId);
+ const bool flushTransactions = clearTransactionFlags(eTransactionFlushNeeded);
+ LifecycleUpdate updates;
+ if (flushTransactions) {
+ updates = flushLifecycleUpdates();
}
-
- const bool shouldCommit =
- (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
- if (shouldCommit) {
- commitTransactions();
+ bool transactionsAreEmpty;
+ if (mLegacyFrontEndEnabled) {
+ mustComposite |= updateLayerSnapshotsLegacy(vsyncId, updates, flushTransactions,
+ transactionsAreEmpty);
+ }
+ if (mLayerLifecycleManagerEnabled) {
+ mustComposite |=
+ updateLayerSnapshots(vsyncId, updates, flushTransactions, transactionsAreEmpty);
}
if (transactionFlushNeeded()) {
setTransactionFlags(eTransactionFlushNeeded);
}
- mustComposite |= shouldCommit;
- mustComposite |= latchBuffers();
-
// This has to be called after latchBuffers because we want to include the layers that have
// been latched in the commit callback
- if (!needsTraversal) {
+ if (transactionsAreEmpty) {
// Invoke empty transaction callbacks early.
mTransactionCallbackInvoker.sendCallbacks(false /* onCommitOnly */);
} else {
// Invoke OnCommit callbacks.
mTransactionCallbackInvoker.sendCallbacks(true /* onCommitOnly */);
}
-
- updateLayerGeometry();
}
// Layers need to get updated (in the previous line) before we can use them for
@@ -2400,15 +2500,6 @@
refreshArgs.updatingOutputGeometryThisFrame = mVisibleRegionsDirty;
refreshArgs.updatingGeometryThisFrame = mGeometryDirty.exchange(false) || mVisibleRegionsDirty;
- std::vector<Layer*> layers;
-
- mDrawingState.traverseInZOrder([&refreshArgs, &layers](Layer* layer) {
- if (auto layerFE = layer->getCompositionEngineLayerFE()) {
- layer->updateSnapshot(refreshArgs.updatingGeometryThisFrame);
- refreshArgs.layers.push_back(layerFE);
- layers.push_back(layer);
- }
- });
refreshArgs.internalDisplayRotationFlags = DisplayDevice::getPrimaryDisplayRotationFlags();
if (CC_UNLIKELY(mDrawingState.colorMatrixChanged)) {
@@ -2435,17 +2526,13 @@
// the scheduler.
const auto presentTime = systemTime();
- {
- std::vector<LayerSnapshotGuard> layerSnapshotGuards;
- for (Layer* layer : layers) {
- layerSnapshotGuards.emplace_back(layer);
- }
- mCompositionEngine->present(refreshArgs);
- }
+ std::vector<std::pair<Layer*, LayerFE*>> layers =
+ moveSnapshotsToCompositionArgs(refreshArgs, /*cursorOnly=*/false, vsyncId.value);
+ mCompositionEngine->present(refreshArgs);
+ moveSnapshotsFromCompositionArgs(refreshArgs, layers);
- for (auto& layer : layers) {
- CompositionResult compositionResult{
- layer->getCompositionEngineLayerFE()->stealCompositionResult()};
+ for (auto [layer, layerFE] : layers) {
+ CompositionResult compositionResult{layerFE->stealCompositionResult()};
layer->onPreComposition(compositionResult.refreshStartTime);
for (auto releaseFence : compositionResult.releaseFences) {
layer->onLayerDisplayed(releaseFence);
@@ -2539,7 +2626,7 @@
for (auto& layer : mLayersPendingRefresh) {
Region visibleReg;
visibleReg.set(layer->getScreenBounds());
- invalidateLayerStack(layer, visibleReg);
+ invalidateLayerStack(layer->getOutputFilter(), visibleReg);
}
mLayersPendingRefresh.clear();
}
@@ -3400,7 +3487,8 @@
void SurfaceFlinger::commitTransactionsLocked(uint32_t transactionFlags) {
// Commit display transactions.
const bool displayTransactionNeeded = transactionFlags & eDisplayTransactionNeeded;
- if (displayTransactionNeeded) {
+ mFrontEndDisplayInfosChanged = displayTransactionNeeded;
+ if (displayTransactionNeeded && !mLayerLifecycleManagerEnabled) {
processDisplayChangesLocked();
mFrontEndDisplayInfos.clear();
for (const auto& [_, display] : mDisplays) {
@@ -3491,7 +3579,7 @@
// this layer is not visible anymore
Region visibleReg;
visibleReg.set(layer->getScreenBounds());
- invalidateLayerStack(sp<Layer>::fromExisting(layer), visibleReg);
+ invalidateLayerStack(layer->getOutputFilter(), visibleReg);
}
});
}
@@ -3579,16 +3667,23 @@
outWindowInfos.reserve(sNumWindowInfos);
sNumWindowInfos = 0;
- mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
- if (!layer->needsInputInfo()) return;
+ if (mLayerLifecycleManagerEnabled) {
+ mLayerSnapshotBuilder.forEachInputSnapshot(
+ [&outWindowInfos](const frontend::LayerSnapshot& snapshot) {
+ outWindowInfos.push_back(snapshot.inputInfo);
+ });
+ } else {
+ mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
+ if (!layer->needsInputInfo()) return;
+ const auto opt =
+ mFrontEndDisplayInfos.get(layer->getLayerStack())
+ .transform([](const frontend::DisplayInfo& info) {
+ return Layer::InputDisplayArgs{&info.transform, info.isSecure};
+ });
- const auto opt = mFrontEndDisplayInfos.get(layer->getLayerStack())
- .transform([](const frontend::DisplayInfo& info) {
- return Layer::InputDisplayArgs{&info.transform, info.isSecure};
- });
-
- outWindowInfos.push_back(layer->fillInputInfo(opt.value_or(Layer::InputDisplayArgs{})));
- });
+ outWindowInfos.push_back(layer->fillInputInfo(opt.value_or(Layer::InputDisplayArgs{})));
+ });
+ }
sNumWindowInfos = outWindowInfos.size();
@@ -3605,17 +3700,9 @@
refreshArgs.outputs.push_back(display->getCompositionDisplay());
}
}
-
- std::vector<LayerSnapshotGuard> layerSnapshotGuards;
- mDrawingState.traverse([&layerSnapshotGuards](Layer* layer) {
- if (layer->getLayerSnapshot()->compositionType ==
- aidl::android::hardware::graphics::composer3::Composition::CURSOR) {
- layer->updateSnapshot(false /* updateGeometry */);
- layerSnapshotGuards.emplace_back(layer);
- }
- });
-
+ auto layers = moveSnapshotsToCompositionArgs(refreshArgs, /*cursorOnly=*/true, 0);
mCompositionEngine->updateCursorAsync(refreshArgs);
+ moveSnapshotsFromCompositionArgs(refreshArgs, layers);
}
void SurfaceFlinger::requestDisplayModes(std::vector<display::DisplayModeRequest> modeRequests) {
@@ -3790,10 +3877,10 @@
}
}
-void SurfaceFlinger::invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty) {
+void SurfaceFlinger::invalidateLayerStack(const ui::LayerFilter& layerFilter, const Region& dirty) {
for (const auto& [token, displayDevice] : FTL_FAKE_GUARD(mStateLock, mDisplays)) {
auto display = displayDevice->getCompositionDisplay();
- if (display->includesLayer(layer->getOutputFilter())) {
+ if (display->includesLayer(layerFilter)) {
display->editState().dirtyRegion.orSelf(dirty);
}
}
@@ -3913,6 +4000,7 @@
{
std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
mCreatedLayers.emplace_back(layer, parent, args.addToRoot);
+ mNewLayers.emplace_back(std::make_unique<frontend::RequestedLayerState>(args));
}
setTransactionFlags(eTransactionNeeded);
@@ -4266,9 +4354,11 @@
const std::vector<ListenerCallbacks>& listenerCallbacks,
int originPid, int originUid, uint64_t transactionId) {
uint32_t transactionFlags = 0;
- for (DisplayState& display : displays) {
- display.sanitize(permissions);
- transactionFlags |= setDisplayStateLocked(display);
+ if (!mLayerLifecycleManagerEnabled) {
+ for (DisplayState& display : displays) {
+ display.sanitize(permissions);
+ transactionFlags |= setDisplayStateLocked(display);
+ }
}
// start and end registration for listeners w/ no surface so they can get their callback. Note
@@ -4280,9 +4370,16 @@
uint32_t clientStateFlags = 0;
for (auto& resolvedState : states) {
- clientStateFlags |=
- setClientStateLocked(frameTimelineInfo, resolvedState, desiredPresentTime,
- isAutoTimestamp, postTime, permissions, transactionId);
+ if (mLegacyFrontEndEnabled) {
+ clientStateFlags |=
+ setClientStateLocked(frameTimelineInfo, resolvedState, desiredPresentTime,
+ isAutoTimestamp, postTime, permissions, transactionId);
+
+ } else /*mLayerLifecycleManagerEnabled*/ {
+ clientStateFlags |= updateLayerCallbacksAndStats(frameTimelineInfo, resolvedState,
+ desiredPresentTime, isAutoTimestamp,
+ postTime, permissions, transactionId);
+ }
if ((flags & eAnimation) && resolvedState.state.surface) {
if (const auto layer = LayerHandle::getLayer(resolvedState.state.surface)) {
using LayerUpdateType = scheduler::LayerHistory::LayerUpdateType;
@@ -4315,8 +4412,8 @@
bool needsTraversal = false;
if (transactionFlags) {
- // We are on the main thread, we are about to preform a traversal. Clear the traversal bit
- // so we don't have to wake up again next frame to preform an unnecessary traversal.
+ // We are on the main thread, we are about to perform a traversal. Clear the traversal bit
+ // so we don't have to wake up again next frame to perform an unnecessary traversal.
if (transactionFlags & eTraversalNeeded) {
transactionFlags = transactionFlags & (~eTraversalNeeded);
needsTraversal = true;
@@ -4329,6 +4426,42 @@
return needsTraversal;
}
+bool SurfaceFlinger::applyAndCommitDisplayTransactionStates(
+ std::vector<TransactionState>& transactions) {
+ Mutex::Autolock _l(mStateLock);
+ bool needsTraversal = false;
+ uint32_t transactionFlags = 0;
+ for (auto& transaction : transactions) {
+ for (DisplayState& display : transaction.displays) {
+ display.sanitize(transaction.permissions);
+ transactionFlags |= setDisplayStateLocked(display);
+ }
+ }
+
+ if (transactionFlags) {
+ // We are on the main thread, we are about to perform a traversal. Clear the traversal bit
+ // so we don't have to wake up again next frame to perform an unnecessary traversal.
+ if (transactionFlags & eTraversalNeeded) {
+ transactionFlags = transactionFlags & (~eTraversalNeeded);
+ needsTraversal = true;
+ }
+ if (transactionFlags) {
+ setTransactionFlags(transactionFlags);
+ }
+ }
+
+ mFrontEndDisplayInfosChanged = mTransactionFlags & eDisplayTransactionNeeded;
+ if (mFrontEndDisplayInfosChanged && !mLegacyFrontEndEnabled) {
+ processDisplayChangesLocked();
+ mFrontEndDisplayInfos.clear();
+ for (const auto& [_, display] : mDisplays) {
+ mFrontEndDisplayInfos.try_emplace(display->getLayerStack(), display->getFrontEndInfo());
+ }
+ }
+
+ return needsTraversal;
+}
+
uint32_t SurfaceFlinger::setDisplayStateLocked(const DisplayState& s) {
const ssize_t index = mCurrentState.displays.indexOfKey(s.token);
if (index < 0) return 0;
@@ -4710,7 +4843,11 @@
s.trustedPresentationListener);
}
- if (layer->setTransactionCompletedListeners(callbackHandles)) flags |= eTraversalNeeded;
+ if (layer->setTransactionCompletedListeners(callbackHandles,
+ layer->willPresentCurrentTransaction())) {
+ flags |= eTraversalNeeded;
+ }
+
// Do not put anything that updates layer state or modifies flags after
// setTransactionCompletedListener
@@ -4723,6 +4860,94 @@
return flags;
}
+uint32_t SurfaceFlinger::updateLayerCallbacksAndStats(const FrameTimelineInfo& frameTimelineInfo,
+ ResolvedComposerState& composerState,
+ int64_t desiredPresentTime,
+ bool isAutoTimestamp, int64_t postTime,
+ uint32_t permissions,
+ uint64_t transactionId) {
+ layer_state_t& s = composerState.state;
+ s.sanitize(permissions);
+ const nsecs_t latchTime = systemTime();
+ bool unused;
+
+ std::vector<ListenerCallbacks> filteredListeners;
+ for (auto& listener : s.listeners) {
+ // Starts a registration but separates the callback ids according to callback type. This
+ // allows the callback invoker to send on latch callbacks earlier.
+ // note that startRegistration will not re-register if the listener has
+ // already be registered for a prior surface control
+
+ ListenerCallbacks onCommitCallbacks = listener.filter(CallbackId::Type::ON_COMMIT);
+ if (!onCommitCallbacks.callbackIds.empty()) {
+ filteredListeners.push_back(onCommitCallbacks);
+ }
+
+ ListenerCallbacks onCompleteCallbacks = listener.filter(CallbackId::Type::ON_COMPLETE);
+ if (!onCompleteCallbacks.callbackIds.empty()) {
+ filteredListeners.push_back(onCompleteCallbacks);
+ }
+ }
+
+ const uint64_t what = s.what;
+ uint32_t flags = 0;
+ sp<Layer> layer = nullptr;
+ if (s.surface) {
+ layer = LayerHandle::getLayer(s.surface);
+ } else {
+ // The client may provide us a null handle. Treat it as if the layer was removed.
+ ALOGW("Attempt to set client state with a null layer handle");
+ }
+ if (layer == nullptr) {
+ for (auto& [listener, callbackIds] : s.listeners) {
+ mTransactionCallbackInvoker.registerUnpresentedCallbackHandle(
+ sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+ }
+ return 0;
+ }
+ if (what & layer_state_t::eProducerDisconnect) {
+ layer->onDisconnect();
+ }
+ std::optional<nsecs_t> dequeueBufferTimestamp;
+ if (what & layer_state_t::eMetadataChanged) {
+ dequeueBufferTimestamp = s.metadata.getInt64(gui::METADATA_DEQUEUE_TIME);
+ }
+
+ std::vector<sp<CallbackHandle>> callbackHandles;
+ if ((what & layer_state_t::eHasListenerCallbacksChanged) && (!filteredListeners.empty())) {
+ for (auto& [listener, callbackIds] : filteredListeners) {
+ callbackHandles.emplace_back(
+ sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+ }
+ }
+ if (what & layer_state_t::eSidebandStreamChanged) {
+ if (layer->setSidebandStream(s.sidebandStream)) flags |= eTraversalNeeded;
+ }
+ if (what & layer_state_t::eBufferChanged) {
+ if (layer->setBuffer(composerState.externalTexture, *s.bufferData, postTime,
+ desiredPresentTime, isAutoTimestamp, dequeueBufferTimestamp,
+ frameTimelineInfo)) {
+ layer->latchBuffer(unused, latchTime);
+ flags |= eTraversalNeeded;
+ }
+ mLayersWithQueuedFrames.emplace(layer);
+ } else if (frameTimelineInfo.vsyncId != FrameTimelineInfo::INVALID_VSYNC_ID) {
+ layer->setFrameTimelineVsyncForBufferlessTransaction(frameTimelineInfo, postTime);
+ }
+
+ if (what & layer_state_t::eTrustedPresentationInfoChanged) {
+ layer->setTrustedPresentationInfo(s.trustedPresentationThresholds,
+ s.trustedPresentationListener);
+ }
+
+ const auto& snapshot = mLayerSnapshotBuilder.getSnapshot(layer->getSequence());
+ bool willPresentCurrentTransaction =
+ snapshot && (snapshot->hasReadyFrame || snapshot->sidebandStreamHasFrame);
+ if (layer->setTransactionCompletedListeners(callbackHandles, willPresentCurrentTransaction))
+ flags |= eTraversalNeeded;
+ return flags;
+}
+
uint32_t SurfaceFlinger::addInputWindowCommands(const InputWindowCommands& inputWindowCommands) {
bool hasChanges = mInputWindowCommands.merge(inputWindowCommands);
return hasChanges ? eTraversalNeeded : 0;
@@ -4791,6 +5016,7 @@
LayerCreationArgs mirrorArgs(args);
mirrorArgs.flags |= ISurfaceComposerClient::eNoColorFill;
mirrorArgs.addToRoot = true;
+ mirrorArgs.layerStackToMirror = layerStack;
result = createEffectLayer(mirrorArgs, &outResult.handle, &rootMirrorLayer);
outResult.layerId = rootMirrorLayer->sequence;
outResult.layerName = String16(rootMirrorLayer->getDebugName());
@@ -4893,7 +5119,12 @@
setTransactionFlags(eTransactionNeeded);
}
-void SurfaceFlinger::onHandleDestroyed(BBinder* handle, sp<Layer>& layer, uint32_t /* layerId */) {
+void SurfaceFlinger::onHandleDestroyed(BBinder* handle, sp<Layer>& layer, uint32_t layerId) {
+ {
+ std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
+ mDestroyedHandles.emplace_back(layerId);
+ }
+
Mutex::Autolock lock(mStateLock);
markLayerPendingRemovalLocked(layer);
mBufferCountTracker.remove(handle);
@@ -4901,6 +5132,8 @@
if (mTransactionTracing) {
mTransactionTracing->onHandleRemoved(handle);
}
+
+ setTransactionFlags(eTransactionFlushNeeded);
}
void SurfaceFlinger::onInitializeDisplays() {
@@ -6464,10 +6697,15 @@
args.useIdentityTransform, args.captureSecureLayers);
});
- auto traverseLayers = [this, args, layerStack](const LayerVector::Visitor& visitor) {
- traverseLayersInLayerStack(layerStack, args.uid, visitor);
- };
- auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+ GetLayerSnapshotsFunction getLayerSnapshots;
+ if (mLayerLifecycleManagerEnabled) {
+ getLayerSnapshots = getLayerSnapshotsForScreenshots(layerStack, args.uid);
+ } else {
+ auto traverseLayers = [this, args, layerStack](const LayerVector::Visitor& visitor) {
+ traverseLayersInLayerStack(layerStack, args.uid, visitor);
+ };
+ getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+ }
auto future = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize,
args.pixelFormat, args.allowProtected, args.grayscale,
@@ -6501,10 +6739,15 @@
false /* captureSecureLayers */);
});
- auto traverseLayers = [this, layerStack](const LayerVector::Visitor& visitor) {
- traverseLayersInLayerStack(layerStack, CaptureArgs::UNSET_UID, visitor);
- };
- auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+ GetLayerSnapshotsFunction getLayerSnapshots;
+ if (mLayerLifecycleManagerEnabled) {
+ getLayerSnapshots = getLayerSnapshotsForScreenshots(layerStack, CaptureArgs::UNSET_UID);
+ } else {
+ auto traverseLayers = [this, layerStack](const LayerVector::Visitor& visitor) {
+ traverseLayersInLayerStack(layerStack, CaptureArgs::UNSET_UID, visitor);
+ };
+ getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+ }
if (captureListener == nullptr) {
ALOGE("capture screen must provide a capture listener callback");
@@ -6599,29 +6842,37 @@
return std::make_unique<LayerRenderArea>(*this, parent, crop, reqSize, dataspace,
childrenOnly, args.captureSecureLayers);
});
-
- auto traverseLayers = [parent, args, excludeLayerIds](const LayerVector::Visitor& visitor) {
- parent->traverseChildrenInZOrder(LayerVector::StateSet::Drawing, [&](Layer* layer) {
- if (!layer->isVisible()) {
- return;
- } else if (args.childrenOnly && layer == parent.get()) {
- return;
- } else if (args.uid != CaptureArgs::UNSET_UID && args.uid != layer->getOwnerUid()) {
- return;
- }
-
- auto p = sp<Layer>::fromExisting(layer);
- while (p != nullptr) {
- if (excludeLayerIds.count(p->sequence) != 0) {
+ GetLayerSnapshotsFunction getLayerSnapshots;
+ if (mLayerLifecycleManagerEnabled) {
+ FloatRect parentCrop = crop.isEmpty() ? FloatRect(0, 0, reqSize.width, reqSize.height)
+ : crop.toFloatRect();
+ getLayerSnapshots = getLayerSnapshotsForScreenshots(parent->sequence, args.uid,
+ std::move(excludeLayerIds),
+ args.childrenOnly, parentCrop);
+ } else {
+ auto traverseLayers = [parent, args, excludeLayerIds](const LayerVector::Visitor& visitor) {
+ parent->traverseChildrenInZOrder(LayerVector::StateSet::Drawing, [&](Layer* layer) {
+ if (!layer->isVisible()) {
+ return;
+ } else if (args.childrenOnly && layer == parent.get()) {
+ return;
+ } else if (args.uid != CaptureArgs::UNSET_UID && args.uid != layer->getOwnerUid()) {
return;
}
- p = p->getParent();
- }
- visitor(layer);
- });
- };
- auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+ auto p = sp<Layer>::fromExisting(layer);
+ while (p != nullptr) {
+ if (excludeLayerIds.count(p->sequence) != 0) {
+ return;
+ }
+ p = p->getParent();
+ }
+
+ visitor(layer);
+ });
+ };
+ getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+ }
if (captureListener == nullptr) {
ALOGE("capture screen must provide a capture listener callback");
@@ -7403,24 +7654,18 @@
return true;
}
-bool SurfaceFlinger::commitCreatedLayers(VsyncId vsyncId) {
- std::vector<LayerCreatedState> createdLayers;
- {
- std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
- createdLayers = std::move(mCreatedLayers);
- mCreatedLayers.clear();
- if (createdLayers.size() == 0) {
- return false;
- }
+bool SurfaceFlinger::commitCreatedLayers(VsyncId vsyncId,
+ std::vector<LayerCreatedState>& createdLayers) {
+ if (createdLayers.size() == 0) {
+ return false;
}
Mutex::Autolock _l(mStateLock);
for (const auto& createdLayer : createdLayers) {
handleLayerCreatedLocked(createdLayer, vsyncId);
}
- createdLayers.clear();
mLayersAdded = true;
- return true;
+ return mLayersAdded;
}
void SurfaceFlinger::updateLayerMetadataSnapshot() {
@@ -7448,6 +7693,150 @@
});
}
+void SurfaceFlinger::moveSnapshotsFromCompositionArgs(
+ compositionengine::CompositionRefreshArgs& refreshArgs,
+ std::vector<std::pair<Layer*, LayerFE*>>& layers) {
+ if (mLayerLifecycleManagerEnabled) {
+ std::vector<std::unique_ptr<frontend::LayerSnapshot>>& snapshots =
+ mLayerSnapshotBuilder.getSnapshots();
+ for (auto [_, layerFE] : layers) {
+ auto i = layerFE->mSnapshot->globalZ;
+ snapshots[i] = std::move(layerFE->mSnapshot);
+ }
+ }
+ if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
+ for (auto [layer, layerFE] : layers) {
+ layer->updateLayerSnapshot(std::move(layerFE->mSnapshot));
+ }
+ }
+}
+
+std::vector<std::pair<Layer*, LayerFE*>> SurfaceFlinger::moveSnapshotsToCompositionArgs(
+ compositionengine::CompositionRefreshArgs& refreshArgs, bool cursorOnly, int64_t vsyncId) {
+ std::vector<std::pair<Layer*, LayerFE*>> layers;
+ if (mLayerLifecycleManagerEnabled) {
+ mLayerSnapshotBuilder.forEachVisibleSnapshot(
+ [&](std::unique_ptr<frontend::LayerSnapshot>& snapshot) {
+ if (cursorOnly &&
+ snapshot->compositionType !=
+ aidl::android::hardware::graphics::composer3::Composition::CURSOR) {
+ return;
+ }
+
+ if (!snapshot->hasSomethingToDraw()) {
+ return;
+ }
+
+ auto it = mLegacyLayers.find(snapshot->sequence);
+ LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(),
+ "Couldnt find layer object for %s",
+ snapshot->getDebugString().c_str());
+ auto& legacyLayer = it->second;
+ sp<LayerFE> layerFE = legacyLayer->getCompositionEngineLayerFE(snapshot->path);
+ layerFE->mSnapshot = std::move(snapshot);
+ refreshArgs.layers.push_back(layerFE);
+ layers.emplace_back(legacyLayer.get(), layerFE.get());
+ });
+ }
+ if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
+ mDrawingState.traverseInZOrder([&refreshArgs, cursorOnly, &layers](Layer* layer) {
+ if (auto layerFE = layer->getCompositionEngineLayerFE()) {
+ if (cursorOnly &&
+ layer->getLayerSnapshot()->compositionType !=
+ aidl::android::hardware::graphics::composer3::Composition::CURSOR)
+ return;
+ layer->updateSnapshot(/* refreshArgs.updatingGeometryThisFrame */ true);
+ layerFE->mSnapshot = layer->stealLayerSnapshot();
+ refreshArgs.layers.push_back(layerFE);
+ layers.emplace_back(layer, layerFE.get());
+ }
+ });
+ }
+
+ return layers;
+}
+
+std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()>
+SurfaceFlinger::getLayerSnapshotsForScreenshots(std::optional<ui::LayerStack> layerStack,
+ uint32_t uid) {
+ return [this, layerStack, uid]() {
+ std::vector<std::pair<Layer*, sp<LayerFE>>> layers;
+ for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
+ if (layerStack && snapshot->outputFilter.layerStack != *layerStack) {
+ continue;
+ }
+ if (uid != CaptureArgs::UNSET_UID && snapshot->inputInfo.ownerUid != uid) {
+ continue;
+ }
+ if (!snapshot->isVisible || !snapshot->hasSomethingToDraw()) {
+ continue;
+ }
+
+ auto it = mLegacyLayers.find(snapshot->sequence);
+ LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(), "Couldnt find layer object for %s",
+ snapshot->getDebugString().c_str());
+ auto& legacyLayer = it->second;
+ sp<LayerFE> layerFE = getFactory().createLayerFE(legacyLayer->getName());
+ layerFE->mSnapshot = std::make_unique<frontend::LayerSnapshot>(*snapshot);
+ layers.emplace_back(legacyLayer.get(), std::move(layerFE));
+ }
+
+ return layers;
+ };
+}
+
+std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()>
+SurfaceFlinger::getLayerSnapshotsForScreenshots(uint32_t rootLayerId, uint32_t uid,
+ std::unordered_set<uint32_t> excludeLayerIds,
+ bool childrenOnly, const FloatRect& parentCrop) {
+ return [this, excludeLayerIds = std::move(excludeLayerIds), uid, rootLayerId, childrenOnly,
+ parentCrop]() {
+ frontend::LayerSnapshotBuilder::Args
+ args{.root = mLayerHierarchyBuilder.getPartialHierarchy(rootLayerId, childrenOnly),
+ .layerLifecycleManager = mLayerLifecycleManager,
+ .displays = mFrontEndDisplayInfos,
+ .displayChanges = true,
+ .globalShadowSettings = mDrawingState.globalShadowSettings,
+ .supportsBlur = mSupportsBlur,
+ .forceFullDamage = mForceFullDamage,
+ .parentCrop = {parentCrop},
+ .excludeLayerIds = std::move(excludeLayerIds)};
+ mLayerSnapshotBuilder.update(args);
+
+ auto getLayerSnapshotsFn = getLayerSnapshotsForScreenshots({}, uid);
+ std::vector<std::pair<Layer*, sp<LayerFE>>> layers = getLayerSnapshotsFn();
+ args.root = mLayerHierarchyBuilder.getHierarchy();
+ args.parentCrop.reset();
+ args.excludeLayerIds.clear();
+ mLayerSnapshotBuilder.update(args);
+ return layers;
+ };
+}
+
+SurfaceFlinger::LifecycleUpdate SurfaceFlinger::flushLifecycleUpdates() {
+ LifecycleUpdate update;
+ ATRACE_NAME("TransactionHandler:flushTransactions");
+ // Locking:
+ // 1. to prevent onHandleDestroyed from being called while the state lock is held,
+ // we must keep a copy of the transactions (specifically the composer
+ // states) around outside the scope of the lock.
+ // 2. Transactions and created layers do not share a lock. To prevent applying
+ // transactions with layers still in the createdLayer queue, flush the transactions
+ // before committing the created layers.
+ update.transactions = mTransactionHandler.flushTransactions();
+ {
+ // TODO(b/238781169) lockless queue this and keep order.
+ std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
+ update.layerCreatedStates = std::move(mCreatedLayers);
+ mCreatedLayers.clear();
+ update.newLayers = std::move(mNewLayers);
+ mNewLayers.clear();
+ update.destroyedHandles = std::move(mDestroyedHandles);
+ mDestroyedHandles.clear();
+ }
+ return update;
+}
+
// gui::ISurfaceComposer
binder::Status SurfaceComposerAIDL::bootFinished() {
@@ -7461,9 +7850,9 @@
binder::Status SurfaceComposerAIDL::createDisplayEventConnection(
VsyncSource vsyncSource, EventRegistration eventRegistration,
- sp<IDisplayEventConnection>* outConnection) {
+ const sp<IBinder>& layerHandle, sp<IDisplayEventConnection>* outConnection) {
sp<IDisplayEventConnection> conn =
- mFlinger->createDisplayEventConnection(vsyncSource, eventRegistration);
+ mFlinger->createDisplayEventConnection(vsyncSource, eventRegistration, layerHandle);
if (conn == nullptr) {
*outConnection = nullptr;
return binderStatusFromStatusT(BAD_VALUE);
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 1eb1fda..0bd15dc 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -71,7 +71,9 @@
#include "FlagManager.h"
#include "FrontEnd/DisplayInfo.h"
#include "FrontEnd/LayerCreationArgs.h"
+#include "FrontEnd/LayerLifecycleManager.h"
#include "FrontEnd/LayerSnapshot.h"
+#include "FrontEnd/LayerSnapshotBuilder.h"
#include "FrontEnd/TransactionHandler.h"
#include "LayerVector.h"
#include "Scheduler/ISchedulerCallback.h"
@@ -450,6 +452,26 @@
FINISHED,
};
+ struct LayerCreatedState {
+ LayerCreatedState(const wp<Layer>& layer, const wp<Layer>& parent, bool addToRoot)
+ : layer(layer), initialParent(parent), addToRoot(addToRoot) {}
+ wp<Layer> layer;
+ // Indicates the initial parent of the created layer, only used for creating layer in
+ // SurfaceFlinger. If nullptr, it may add the created layer into the current root layers.
+ wp<Layer> initialParent;
+ // Indicates whether the layer getting created should be added at root if there's no parent
+ // and has permission ACCESS_SURFACE_FLINGER. If set to false and no parent, the layer will
+ // be added offscreen.
+ bool addToRoot;
+ };
+
+ struct LifecycleUpdate {
+ std::vector<TransactionState> transactions;
+ std::vector<LayerCreatedState> layerCreatedStates;
+ std::vector<std::unique_ptr<frontend::RequestedLayerState>> newLayers;
+ std::vector<uint32_t> destroyedHandles;
+ };
+
template <typename F, std::enable_if_t<!std::is_member_function_pointer_v<F>>* = nullptr>
static Dumper dumper(F&& dump) {
using namespace std::placeholders;
@@ -509,7 +531,8 @@
sp<IDisplayEventConnection> createDisplayEventConnection(
gui::ISurfaceComposer::VsyncSource vsyncSource =
gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
- EventRegistrationFlags eventRegistration = {});
+ EventRegistrationFlags eventRegistration = {},
+ const sp<IBinder>& layerHandle = nullptr);
status_t captureDisplay(const DisplayCaptureArgs&, const sp<IScreenCaptureListener>&);
status_t captureDisplay(DisplayId, const sp<IScreenCaptureListener>&);
@@ -688,6 +711,17 @@
void updateLayerGeometry();
void updateLayerMetadataSnapshot();
+ std::vector<std::pair<Layer*, LayerFE*>> moveSnapshotsToCompositionArgs(
+ compositionengine::CompositionRefreshArgs& refreshArgs, bool cursorOnly,
+ int64_t vsyncId);
+ void moveSnapshotsFromCompositionArgs(compositionengine::CompositionRefreshArgs& refreshArgs,
+ std::vector<std::pair<Layer*, LayerFE*>>& layers);
+ bool updateLayerSnapshotsLegacy(VsyncId vsyncId, LifecycleUpdate& update,
+ bool transactionsFlushed, bool& out)
+ REQUIRES(kMainThreadContext);
+ bool updateLayerSnapshots(VsyncId vsyncId, LifecycleUpdate& update, bool transactionsFlushed,
+ bool& out) REQUIRES(kMainThreadContext);
+ LifecycleUpdate flushLifecycleUpdates() REQUIRES(kMainThreadContext);
void updateInputFlinger();
void persistDisplayBrightness(bool needsComposite) REQUIRES(kMainThreadContext);
@@ -715,6 +749,8 @@
bool flushTransactionQueues(VsyncId) REQUIRES(kMainThreadContext);
bool applyTransactions(std::vector<TransactionState>&, VsyncId) REQUIRES(kMainThreadContext);
+ bool applyAndCommitDisplayTransactionStates(std::vector<TransactionState>& transactions)
+ REQUIRES(kMainThreadContext);
// Returns true if there is at least one transaction that needs to be flushed
bool transactionFlushNeeded();
@@ -730,7 +766,10 @@
int64_t desiredPresentTime, bool isAutoTimestamp,
int64_t postTime, uint32_t permissions, uint64_t transactionId)
REQUIRES(mStateLock);
-
+ uint32_t updateLayerCallbacksAndStats(const FrameTimelineInfo&, ResolvedComposerState&,
+ int64_t desiredPresentTime, bool isAutoTimestamp,
+ int64_t postTime, uint32_t permissions,
+ uint64_t transactionId) REQUIRES(mStateLock);
uint32_t getTransactionFlags() const;
// Sets the masked bits, and schedules a commit if needed.
@@ -888,7 +927,7 @@
// mark a region of a layer stack dirty. this updates the dirty
// region of all screens presenting this layer stack.
- void invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty);
+ void invalidateLayerStack(const ui::LayerFilter& layerFilter, const Region& dirty);
ui::LayerFilter makeLayerFilterForDisplay(DisplayId displayId, ui::LayerStack layerStack)
REQUIRES(mStateLock) {
@@ -1154,6 +1193,7 @@
// Set if LayerMetadata has changed since the last LayerMetadata snapshot.
bool mLayerMetadataSnapshotNeeded = false;
+ // TODO(b/238781169) validate these on composition
// Tracks layers that have pending frames which are candidates for being
// latched.
std::unordered_set<sp<Layer>, SpHash<Layer>> mLayersWithQueuedFrames;
@@ -1325,23 +1365,11 @@
GUARDED_BY(mStateLock);
mutable std::mutex mCreatedLayersLock;
- struct LayerCreatedState {
- LayerCreatedState(const wp<Layer>& layer, const wp<Layer> parent, bool addToRoot)
- : layer(layer), initialParent(parent), addToRoot(addToRoot) {}
- wp<Layer> layer;
- // Indicates the initial parent of the created layer, only used for creating layer in
- // SurfaceFlinger. If nullptr, it may add the created layer into the current root layers.
- wp<Layer> initialParent;
- // Indicates whether the layer getting created should be added at root if there's no parent
- // and has permission ACCESS_SURFACE_FLINGER. If set to false and no parent, the layer will
- // be added offscreen.
- bool addToRoot;
- };
// A temporay pool that store the created layers and will be added to current state in main
// thread.
std::vector<LayerCreatedState> mCreatedLayers GUARDED_BY(mCreatedLayersLock);
- bool commitCreatedLayers(VsyncId);
+ bool commitCreatedLayers(VsyncId, std::vector<LayerCreatedState>& createdLayers);
void handleLayerCreatedLocked(const LayerCreatedState&, VsyncId) REQUIRES(mStateLock);
mutable std::mutex mMirrorDisplayLock;
@@ -1363,6 +1391,11 @@
return hasDisplay(
[](const auto& display) { return display.isRefreshRateOverlayEnabled(); });
}
+ std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()> getLayerSnapshotsForScreenshots(
+ std::optional<ui::LayerStack> layerStack, uint32_t uid);
+ std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()> getLayerSnapshotsForScreenshots(
+ uint32_t rootLayerId, uint32_t uid, std::unordered_set<uint32_t> excludeLayerIds,
+ bool childrenOnly, const FloatRect& parentCrop);
const sp<WindowInfosListenerInvoker> mWindowInfosListenerInvoker;
@@ -1375,6 +1408,18 @@
bool mPowerHintSessionEnabled;
+ bool mLayerLifecycleManagerEnabled = false;
+ bool mLegacyFrontEndEnabled = true;
+
+ frontend::LayerLifecycleManager mLayerLifecycleManager;
+ frontend::LayerHierarchyBuilder mLayerHierarchyBuilder{{}};
+ frontend::LayerSnapshotBuilder mLayerSnapshotBuilder;
+
+ std::vector<uint32_t> mDestroyedHandles;
+ std::vector<std::unique_ptr<frontend::RequestedLayerState>> mNewLayers;
+ // These classes do not store any client state but help with managing transaction callbacks
+ // and stats.
+ std::unordered_map<uint32_t, sp<Layer>> mLegacyLayers;
struct {
bool late = false;
bool early = false;
@@ -1382,6 +1427,7 @@
TransactionHandler mTransactionHandler;
display::DisplayMap<ui::LayerStack, frontend::DisplayInfo> mFrontEndDisplayInfos;
+ bool mFrontEndDisplayInfosChanged = false;
};
class SurfaceComposerAIDL : public gui::BnSurfaceComposer {
@@ -1391,6 +1437,7 @@
binder::Status bootFinished() override;
binder::Status createDisplayEventConnection(
VsyncSource vsyncSource, EventRegistration eventRegistration,
+ const sp<IBinder>& layerHandle,
sp<gui::IDisplayEventConnection>* outConnection) override;
binder::Status createConnection(sp<gui::ISurfaceComposerClient>* outClient) override;
binder::Status createDisplay(const std::string& displayName, bool secure,
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp b/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
index 11719c4..c088e7b 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
@@ -148,7 +148,7 @@
layer->fenceHasSignaled();
layer->onPreComposition(mFdp.ConsumeIntegral<int64_t>());
const std::vector<sp<CallbackHandle>> callbacks;
- layer->setTransactionCompletedListeners(callbacks);
+ layer->setTransactionCompletedListeners(callbacks, mFdp.ConsumeBool());
std::shared_ptr<renderengine::ExternalTexture> texture = std::make_shared<
renderengine::mock::FakeExternalTexture>(mFdp.ConsumeIntegral<uint32_t>(),