Update ShaderCache to support shared locking
Currently, the shadercache can lock for a long time when writing the
cache to disk while vulkan is trying to access it, causing frame drops.
However, neither operation actually requires write access. By using
shared_locks, both operations can happen concurrently eliminating this
contention and saving the frame(s).
Test: atest hwui_unit_tests:ShaderCacheTest
Bug: 288252750
Change-Id: I26c5bee001d44cdd75766e9cb5974bd9337819ab
diff --git a/libs/hwui/pipeline/skia/ShaderCache.cpp b/libs/hwui/pipeline/skia/ShaderCache.cpp
index 00919dc..f71e728 100644
--- a/libs/hwui/pipeline/skia/ShaderCache.cpp
+++ b/libs/hwui/pipeline/skia/ShaderCache.cpp
@@ -79,7 +79,7 @@
void ShaderCache::initShaderDiskCache(const void* identity, ssize_t size) {
ATRACE_NAME("initShaderDiskCache");
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
// Emulators can switch between different renders either as part of config
// or snapshot migration. Also, program binaries may not work well on some
@@ -92,7 +92,7 @@
}
void ShaderCache::setFilename(const char* filename) {
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
mFilename = filename;
}
@@ -104,7 +104,7 @@
sk_sp<SkData> ShaderCache::load(const SkData& key) {
ATRACE_NAME("ShaderCache::load");
size_t keySize = key.size();
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
if (!mInitialized) {
return nullptr;
}
@@ -181,13 +181,18 @@
auto key = sIDKey;
set(mBlobCache.get(), &key, sizeof(key), mIDHash.data(), mIDHash.size());
}
+ // The most straightforward way to make ownership shared
+ mMutex.unlock();
+ mMutex.lock_shared();
mBlobCache->writeToFile();
+ mMutex.unlock_shared();
+ mMutex.lock();
}
}
void ShaderCache::store(const SkData& key, const SkData& data, const SkString& /*description*/) {
ATRACE_NAME("ShaderCache::store");
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
mNumShadersCachedInRam++;
ATRACE_FORMAT("HWUI RAM cache: %d shaders", mNumShadersCachedInRam);
@@ -229,7 +234,7 @@
mSavePending = true;
std::thread deferredSaveThread([this]() {
usleep(mDeferredSaveDelayMs * 1000); // milliseconds to microseconds
- std::lock_guard<std::mutex> lock(mMutex);
+ std::lock_guard lock(mMutex);
// Store file on disk if there a new shader or Vulkan pipeline cache size changed.
if (mCacheDirty || mNewPipelineCacheSize != mOldPipelineCacheSize) {
saveToDiskLocked();
@@ -245,11 +250,12 @@
void ShaderCache::onVkFrameFlushed(GrDirectContext* context) {
{
- std::lock_guard<std::mutex> lock(mMutex);
-
+ mMutex.lock_shared();
if (!mInitialized || !mTryToStorePipelineCache) {
+ mMutex.unlock_shared();
return;
}
+ mMutex.unlock_shared();
}
mInStoreVkPipelineInProgress = true;
context->storeVkPipelineCacheData();
diff --git a/libs/hwui/pipeline/skia/ShaderCache.h b/libs/hwui/pipeline/skia/ShaderCache.h
index f5506d6..2f91c77 100644
--- a/libs/hwui/pipeline/skia/ShaderCache.h
+++ b/libs/hwui/pipeline/skia/ShaderCache.h
@@ -19,8 +19,10 @@
#include <GrContextOptions.h>
#include <SkRefCnt.h>
#include <cutils/compiler.h>
+#include <ftl/shared_mutex.h>
+#include <utils/Mutex.h>
+
#include <memory>
-#include <mutex>
#include <string>
#include <vector>
@@ -99,20 +101,20 @@
* this will do so, loading the serialized cache contents from disk if
* possible.
*/
- BlobCache* getBlobCacheLocked();
+ BlobCache* getBlobCacheLocked() REQUIRES(mMutex);
/**
* "validateCache" updates the cache to match the given identity. If the
* cache currently has the wrong identity, all entries in the cache are cleared.
*/
- bool validateCache(const void* identity, ssize_t size);
+ bool validateCache(const void* identity, ssize_t size) REQUIRES(mMutex);
/**
- * "saveToDiskLocked" attemps to save the current contents of the cache to
+ * "saveToDiskLocked" attempts to save the current contents of the cache to
* disk. If the identity hash exists, we will insert the identity hash into
* the cache for next validation.
*/
- void saveToDiskLocked();
+ void saveToDiskLocked() REQUIRES(mMutex);
/**
* "mInitialized" indicates whether the ShaderCache is in the initialized
@@ -122,7 +124,7 @@
* the load and store methods will return without performing any cache
* operations.
*/
- bool mInitialized = false;
+ bool mInitialized GUARDED_BY(mMutex) = false;
/**
* "mBlobCache" is the cache in which the key/value blob pairs are stored. It
@@ -131,7 +133,7 @@
* The blob cache contains the Android build number. We treat version mismatches as an empty
* cache (logic implemented in BlobCache::unflatten).
*/
- std::unique_ptr<FileBlobCache> mBlobCache;
+ std::unique_ptr<FileBlobCache> mBlobCache GUARDED_BY(mMutex);
/**
* "mFilename" is the name of the file for storing cache contents in between
@@ -140,7 +142,7 @@
* empty string indicates that the cache should not be saved to or restored
* from disk.
*/
- std::string mFilename;
+ std::string mFilename GUARDED_BY(mMutex);
/**
* "mIDHash" is the current identity hash for the cache validation. It is
@@ -149,7 +151,7 @@
* indicates that cache validation is not performed, and the hash should
* not be stored on disk.
*/
- std::vector<uint8_t> mIDHash;
+ std::vector<uint8_t> mIDHash GUARDED_BY(mMutex);
/**
* "mSavePending" indicates whether or not a deferred save operation is
@@ -159,7 +161,7 @@
* contents to disk, unless mDeferredSaveDelayMs is 0 in which case saving
* is disabled.
*/
- bool mSavePending = false;
+ bool mSavePending GUARDED_BY(mMutex) = false;
/**
* "mObservedBlobValueSize" is the maximum value size observed by the cache reading function.
@@ -174,16 +176,16 @@
unsigned int mDeferredSaveDelayMs = 4 * 1000;
/**
- * "mMutex" is the mutex used to prevent concurrent access to the member
+ * "mMutex" is the shared mutex used to prevent concurrent access to the member
* variables. It must be locked whenever the member variables are accessed.
*/
- mutable std::mutex mMutex;
+ mutable ftl::SharedMutex mMutex;
/**
* If set to "true", the next call to onVkFrameFlushed, will invoke
* GrCanvas::storeVkPipelineCacheData. This does not guarantee that data will be stored on disk.
*/
- bool mTryToStorePipelineCache = true;
+ bool mTryToStorePipelineCache GUARDED_BY(mMutex) = true;
/**
* This flag is used by "ShaderCache::store" to distinguish between shader data and
@@ -195,16 +197,16 @@
* "mNewPipelineCacheSize" has the size of the new Vulkan pipeline cache data. It is used
* to prevent unnecessary disk writes, if the pipeline cache size has not changed.
*/
- size_t mNewPipelineCacheSize = -1;
+ size_t mNewPipelineCacheSize GUARDED_BY(mMutex) = -1;
/**
* "mOldPipelineCacheSize" has the size of the Vulkan pipeline cache data stored on disk.
*/
- size_t mOldPipelineCacheSize = -1;
+ size_t mOldPipelineCacheSize GUARDED_BY(mMutex) = -1;
/**
* "mCacheDirty" is true when there is new shader cache data, which is not saved to disk.
*/
- bool mCacheDirty = false;
+ bool mCacheDirty GUARDED_BY(mMutex) = false;
/**
* "sCache" is the singleton ShaderCache object.
@@ -221,7 +223,7 @@
* interesting to keep track of how many shaders are stored in RAM. This
* class provides a convenient entry point for that.
*/
- int mNumShadersCachedInRam = 0;
+ int mNumShadersCachedInRam GUARDED_BY(mMutex) = 0;
friend class ShaderCacheTestUtils; // used for unit testing
};