Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "ShaderCache.h" |
Ryan Prichard | bc3bf5b | 2024-04-22 22:03:20 -0700 | [diff] [blame] | 18 | |
Kevin Lubick | 1175dc0 | 2022-02-28 12:41:27 -0500 | [diff] [blame] | 19 | #include <SkData.h> |
rnlee | ce9762b | 2021-05-21 15:40:53 -0700 | [diff] [blame] | 20 | #include <gui/TraceUtils.h> |
Nolan Scobie | 572d801 | 2024-08-30 13:43:34 -0400 | [diff] [blame^] | 21 | #include <include/gpu/ganesh/GrDirectContext.h> |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 22 | #include <log/log.h> |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 23 | #include <openssl/sha.h> |
Ryan Prichard | bc3bf5b | 2024-04-22 22:03:20 -0700 | [diff] [blame] | 24 | |
John Reck | 283bb46 | 2018-12-13 16:40:14 -0800 | [diff] [blame] | 25 | #include <algorithm> |
| 26 | #include <array> |
Ryan Prichard | bc3bf5b | 2024-04-22 22:03:20 -0700 | [diff] [blame] | 27 | #include <mutex> |
John Reck | 283bb46 | 2018-12-13 16:40:14 -0800 | [diff] [blame] | 28 | #include <thread> |
Ryan Prichard | bc3bf5b | 2024-04-22 22:03:20 -0700 | [diff] [blame] | 29 | |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 30 | #include "FileBlobCache.h" |
Lingfeng Yang | 3a9f223 | 2018-01-24 10:40:18 -0800 | [diff] [blame] | 31 | #include "Properties.h" |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 32 | |
| 33 | namespace android { |
| 34 | namespace uirenderer { |
| 35 | namespace skiapipeline { |
| 36 | |
| 37 | // Cache size limits. |
| 38 | static const size_t maxKeySize = 1024; |
Leon Scroggins III | 05f5eca | 2021-06-07 16:09:37 -0400 | [diff] [blame] | 39 | static const size_t maxValueSize = 2 * 1024 * 1024; |
Nolan Scobie | f50917c | 2023-02-09 13:43:52 -0500 | [diff] [blame] | 40 | static const size_t maxTotalSize = 4 * 1024 * 1024; |
| 41 | static_assert(maxKeySize + maxValueSize < maxTotalSize); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 42 | |
| 43 | ShaderCache::ShaderCache() { |
| 44 | // There is an "incomplete FileBlobCache type" compilation error, if ctor is moved to header. |
| 45 | } |
| 46 | |
| 47 | ShaderCache ShaderCache::sCache; |
| 48 | |
| 49 | ShaderCache& ShaderCache::get() { |
| 50 | return sCache; |
| 51 | } |
| 52 | |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 53 | bool ShaderCache::validateCache(const void* identity, ssize_t size) { |
John Reck | 283bb46 | 2018-12-13 16:40:14 -0800 | [diff] [blame] | 54 | if (nullptr == identity && size == 0) return true; |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 55 | |
| 56 | if (nullptr == identity || size < 0) { |
| 57 | if (CC_UNLIKELY(Properties::debugLevel & kDebugCaches)) { |
| 58 | ALOGW("ShaderCache::validateCache invalid cache identity"); |
| 59 | } |
| 60 | mBlobCache->clear(); |
| 61 | return false; |
| 62 | } |
| 63 | |
| 64 | SHA256_CTX ctx; |
| 65 | SHA256_Init(&ctx); |
| 66 | |
| 67 | SHA256_Update(&ctx, identity, size); |
| 68 | mIDHash.resize(SHA256_DIGEST_LENGTH); |
| 69 | SHA256_Final(mIDHash.data(), &ctx); |
| 70 | |
| 71 | std::array<uint8_t, SHA256_DIGEST_LENGTH> hash; |
| 72 | auto key = sIDKey; |
| 73 | auto loaded = mBlobCache->get(&key, sizeof(key), hash.data(), hash.size()); |
| 74 | |
John Reck | 283bb46 | 2018-12-13 16:40:14 -0800 | [diff] [blame] | 75 | if (loaded && std::equal(hash.begin(), hash.end(), mIDHash.begin())) return true; |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 76 | |
| 77 | if (CC_UNLIKELY(Properties::debugLevel & kDebugCaches)) { |
| 78 | ALOGW("ShaderCache::validateCache cache validation fails"); |
| 79 | } |
| 80 | mBlobCache->clear(); |
| 81 | return false; |
| 82 | } |
| 83 | |
| 84 | void ShaderCache::initShaderDiskCache(const void* identity, ssize_t size) { |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 85 | ATRACE_NAME("initShaderDiskCache"); |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 86 | std::lock_guard lock(mMutex); |
Lingfeng Yang | 3a9f223 | 2018-01-24 10:40:18 -0800 | [diff] [blame] | 87 | |
| 88 | // Emulators can switch between different renders either as part of config |
| 89 | // or snapshot migration. Also, program binaries may not work well on some |
| 90 | // desktop / laptop GPUs. Thus, disable the shader disk cache for emulator builds. |
| 91 | if (!Properties::runningInEmulator && mFilename.length() > 0) { |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 92 | mBlobCache.reset(new FileBlobCache(maxKeySize, maxValueSize, maxTotalSize, mFilename)); |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 93 | validateCache(identity, size); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 94 | mInitialized = true; |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 95 | if (identity != nullptr && size > 0 && mIDHash.size()) { |
| 96 | set(&sIDKey, sizeof(sIDKey), mIDHash.data(), mIDHash.size()); |
| 97 | } |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 98 | } |
| 99 | } |
| 100 | |
| 101 | void ShaderCache::setFilename(const char* filename) { |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 102 | std::lock_guard lock(mMutex); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 103 | mFilename = filename; |
| 104 | } |
| 105 | |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 106 | sk_sp<SkData> ShaderCache::load(const SkData& key) { |
| 107 | ATRACE_NAME("ShaderCache::load"); |
| 108 | size_t keySize = key.size(); |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 109 | std::lock_guard lock(mMutex); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 110 | if (!mInitialized) { |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 111 | return nullptr; |
| 112 | } |
| 113 | |
| 114 | // mObservedBlobValueSize is reasonably big to avoid memory reallocation |
| 115 | // Allocate a buffer with malloc. SkData takes ownership of that allocation and will call free. |
| 116 | void* valueBuffer = malloc(mObservedBlobValueSize); |
| 117 | if (!valueBuffer) { |
| 118 | return nullptr; |
| 119 | } |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 120 | size_t valueSize = mBlobCache->get(key.data(), keySize, valueBuffer, mObservedBlobValueSize); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 121 | int maxTries = 3; |
| 122 | while (valueSize > mObservedBlobValueSize && maxTries > 0) { |
| 123 | mObservedBlobValueSize = std::min(valueSize, maxValueSize); |
John Reck | 283bb46 | 2018-12-13 16:40:14 -0800 | [diff] [blame] | 124 | void* newValueBuffer = realloc(valueBuffer, mObservedBlobValueSize); |
Stan Iliev | 003a9f6 | 2018-03-29 13:33:53 -0400 | [diff] [blame] | 125 | if (!newValueBuffer) { |
| 126 | free(valueBuffer); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 127 | return nullptr; |
| 128 | } |
Stan Iliev | 003a9f6 | 2018-03-29 13:33:53 -0400 | [diff] [blame] | 129 | valueBuffer = newValueBuffer; |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 130 | valueSize = mBlobCache->get(key.data(), keySize, valueBuffer, mObservedBlobValueSize); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 131 | maxTries--; |
| 132 | } |
| 133 | if (!valueSize) { |
| 134 | free(valueBuffer); |
| 135 | return nullptr; |
| 136 | } |
| 137 | if (valueSize > mObservedBlobValueSize) { |
John Reck | 283bb46 | 2018-12-13 16:40:14 -0800 | [diff] [blame] | 138 | ALOGE("ShaderCache::load value size is too big %d", (int)valueSize); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 139 | free(valueBuffer); |
| 140 | return nullptr; |
| 141 | } |
Leon Scroggins III | 8cedb66 | 2022-05-02 10:38:38 -0400 | [diff] [blame] | 142 | mNumShadersCachedInRam++; |
| 143 | ATRACE_FORMAT("HWUI RAM cache: %d shaders", mNumShadersCachedInRam); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 144 | return SkData::MakeFromMalloc(valueBuffer, valueSize); |
| 145 | } |
| 146 | |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 147 | void ShaderCache::set(const void* key, size_t keySize, const void* value, size_t valueSize) { |
| 148 | switch (mBlobCache->set(key, keySize, value, valueSize)) { |
Leon Scroggins III | 77644a2 | 2022-05-03 15:50:51 -0400 | [diff] [blame] | 149 | case BlobCache::InsertResult::kInserted: |
| 150 | // This is what we expect/hope. It means the cache is large enough. |
| 151 | return; |
| 152 | case BlobCache::InsertResult::kDidClean: { |
| 153 | ATRACE_FORMAT("ShaderCache: evicted an entry to fit {key: %lu value %lu}!", keySize, |
| 154 | valueSize); |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 155 | if (mIDHash.size()) { |
| 156 | set(&sIDKey, sizeof(sIDKey), mIDHash.data(), mIDHash.size()); |
| 157 | } |
Leon Scroggins III | 77644a2 | 2022-05-03 15:50:51 -0400 | [diff] [blame] | 158 | return; |
| 159 | } |
| 160 | case BlobCache::InsertResult::kNotEnoughSpace: { |
| 161 | ATRACE_FORMAT("ShaderCache: could not fit {key: %lu value %lu}!", keySize, valueSize); |
| 162 | return; |
| 163 | } |
| 164 | case BlobCache::InsertResult::kInvalidValueSize: |
| 165 | case BlobCache::InsertResult::kInvalidKeySize: { |
| 166 | ATRACE_FORMAT("ShaderCache: invalid size {key: %lu value %lu}!", keySize, valueSize); |
| 167 | return; |
| 168 | } |
| 169 | case BlobCache::InsertResult::kKeyTooBig: |
| 170 | case BlobCache::InsertResult::kValueTooBig: |
| 171 | case BlobCache::InsertResult::kCombinedTooBig: { |
| 172 | ATRACE_FORMAT("ShaderCache: entry too big: {key: %lu value %lu}!", keySize, valueSize); |
| 173 | return; |
| 174 | } |
| 175 | } |
| 176 | } |
Leon Scroggins III | 77644a2 | 2022-05-03 15:50:51 -0400 | [diff] [blame] | 177 | |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 178 | void ShaderCache::saveToDiskLocked() { |
| 179 | ATRACE_NAME("ShaderCache::saveToDiskLocked"); |
Nolan Scobie | 193cd96 | 2023-02-08 20:03:31 -0500 | [diff] [blame] | 180 | if (mInitialized && mBlobCache) { |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 181 | // The most straightforward way to make ownership shared |
| 182 | mMutex.unlock(); |
| 183 | mMutex.lock_shared(); |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 184 | mBlobCache->writeToFile(); |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 185 | mMutex.unlock_shared(); |
| 186 | mMutex.lock(); |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 187 | } |
Yichi Chen | 9f95955 | 2018-03-29 21:21:54 +0800 | [diff] [blame] | 188 | } |
| 189 | |
Leon Scroggins III | 8cedb66 | 2022-05-02 10:38:38 -0400 | [diff] [blame] | 190 | void ShaderCache::store(const SkData& key, const SkData& data, const SkString& /*description*/) { |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 191 | ATRACE_NAME("ShaderCache::store"); |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 192 | std::lock_guard lock(mMutex); |
Leon Scroggins III | 8cedb66 | 2022-05-02 10:38:38 -0400 | [diff] [blame] | 193 | mNumShadersCachedInRam++; |
| 194 | ATRACE_FORMAT("HWUI RAM cache: %d shaders", mNumShadersCachedInRam); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 195 | |
| 196 | if (!mInitialized) { |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 197 | return; |
| 198 | } |
| 199 | |
| 200 | size_t valueSize = data.size(); |
| 201 | size_t keySize = key.size(); |
| 202 | if (keySize == 0 || valueSize == 0 || valueSize >= maxValueSize) { |
| 203 | ALOGW("ShaderCache::store: sizes %d %d not allowed", (int)keySize, (int)valueSize); |
| 204 | return; |
| 205 | } |
| 206 | |
| 207 | const void* value = data.data(); |
| 208 | |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 209 | if (mInStoreVkPipelineInProgress) { |
| 210 | if (mOldPipelineCacheSize == -1) { |
| 211 | // Record the initial pipeline cache size stored in the file. |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 212 | mOldPipelineCacheSize = mBlobCache->get(key.data(), keySize, nullptr, 0); |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 213 | } |
| 214 | if (mNewPipelineCacheSize != -1 && mNewPipelineCacheSize == valueSize) { |
| 215 | // There has not been change in pipeline cache size. Stop trying to save. |
| 216 | mTryToStorePipelineCache = false; |
| 217 | return; |
| 218 | } |
| 219 | mNewPipelineCacheSize = valueSize; |
| 220 | } else { |
| 221 | mCacheDirty = true; |
| 222 | // If there are new shaders compiled, we probably have new pipeline state too. |
| 223 | // Store pipeline cache on the next flush. |
| 224 | mNewPipelineCacheSize = -1; |
| 225 | mTryToStorePipelineCache = true; |
| 226 | } |
Matt Buckley | e9adfbd | 2023-07-06 23:15:30 +0000 | [diff] [blame] | 227 | set(key.data(), keySize, value, valueSize); |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 228 | |
Nolan Scobie | 193cd96 | 2023-02-08 20:03:31 -0500 | [diff] [blame] | 229 | if (!mSavePending && mDeferredSaveDelayMs > 0) { |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 230 | mSavePending = true; |
| 231 | std::thread deferredSaveThread([this]() { |
Nolan Scobie | 193cd96 | 2023-02-08 20:03:31 -0500 | [diff] [blame] | 232 | usleep(mDeferredSaveDelayMs * 1000); // milliseconds to microseconds |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 233 | std::lock_guard lock(mMutex); |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 234 | // Store file on disk if there a new shader or Vulkan pipeline cache size changed. |
| 235 | if (mCacheDirty || mNewPipelineCacheSize != mOldPipelineCacheSize) { |
| 236 | saveToDiskLocked(); |
| 237 | mOldPipelineCacheSize = mNewPipelineCacheSize; |
| 238 | mTryToStorePipelineCache = false; |
| 239 | mCacheDirty = false; |
| 240 | } |
Nolan Scobie | 193cd96 | 2023-02-08 20:03:31 -0500 | [diff] [blame] | 241 | mSavePending = false; |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 242 | }); |
| 243 | deferredSaveThread.detach(); |
| 244 | } |
| 245 | } |
| 246 | |
Adlai Holler | d234521 | 2020-10-07 14:16:40 -0400 | [diff] [blame] | 247 | void ShaderCache::onVkFrameFlushed(GrDirectContext* context) { |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 248 | { |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 249 | mMutex.lock_shared(); |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 250 | if (!mInitialized || !mTryToStorePipelineCache) { |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 251 | mMutex.unlock_shared(); |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 252 | return; |
| 253 | } |
Matt Buckley | e278da1 | 2023-06-20 22:51:05 +0000 | [diff] [blame] | 254 | mMutex.unlock_shared(); |
Stan Iliev | 14211aa | 2019-01-14 12:29:30 -0500 | [diff] [blame] | 255 | } |
| 256 | mInStoreVkPipelineInProgress = true; |
| 257 | context->storeVkPipelineCacheData(); |
| 258 | mInStoreVkPipelineInProgress = false; |
| 259 | } |
| 260 | |
Stan Iliev | d495f43 | 2017-10-09 15:49:32 -0400 | [diff] [blame] | 261 | } /* namespace skiapipeline */ |
| 262 | } /* namespace uirenderer */ |
| 263 | } /* namespace android */ |