blob: b3a4bc19fd4120373926ad84af3ddaf9ecd1645a [file] [log] [blame]
Mathias Agopian5f549b22017-03-08 22:27:13 -08001/*
2 ** Copyright 2011, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 ** http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16
Mathias Agopian5f549b22017-03-08 22:27:13 -080017//#define LOG_NDEBUG 0
Yiwei Zhangbcbfe602023-04-22 04:43:33 +000018#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Mathias Agopian5f549b22017-03-08 22:27:13 -080019
Mathias Agopianb7f9a242017-03-08 22:29:31 -080020#include "BlobCache.h"
Mathias Agopian5f549b22017-03-08 22:27:13 -080021
Yiwei Zhang8af03062020-08-12 21:28:15 -070022#include <android-base/properties.h>
Dan Albert9f93afe2017-10-11 12:42:46 -070023#include <errno.h>
Mathias Agopian5f549b22017-03-08 22:27:13 -080024#include <inttypes.h>
Mathias Agopianb7f9a242017-03-08 22:29:31 -080025#include <log/log.h>
Yiwei Zhangbcbfe602023-04-22 04:43:33 +000026#include <utils/Trace.h>
Yiwei Zhang8af03062020-08-12 21:28:15 -070027
Mathias Agopianb7f9a242017-03-08 22:29:31 -080028#include <chrono>
Mathias Agopian5f549b22017-03-08 22:27:13 -080029
30namespace android {
31
32// BlobCache::Header::mMagicNumber value
33static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
34
35// BlobCache::Header::mBlobCacheVersion value
36static const uint32_t blobCacheVersion = 3;
37
38// BlobCache::Header::mDeviceVersion value
39static const uint32_t blobCacheDeviceVersion = 1;
40
Yiwei Zhang8af03062020-08-12 21:28:15 -070041BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize)
42 : mMaxTotalSize(maxTotalSize),
Mathias Agopian5f549b22017-03-08 22:27:13 -080043 mMaxKeySize(maxKeySize),
44 mMaxValueSize(maxValueSize),
Mathias Agopian5f549b22017-03-08 22:27:13 -080045 mTotalSize(0) {
Mathias Agopianb7f9a242017-03-08 22:29:31 -080046 int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
Mathias Agopian5f549b22017-03-08 22:27:13 -080047#ifdef _WIN32
48 srand(now);
49#else
50 mRandState[0] = (now >> 0) & 0xFFFF;
51 mRandState[1] = (now >> 16) & 0xFFFF;
52 mRandState[2] = (now >> 32) & 0xFFFF;
53#endif
54 ALOGV("initializing random seed using %lld", (unsigned long long)now);
55}
56
Leon Scroggins III23377962022-05-03 14:53:14 -040057BlobCache::InsertResult BlobCache::set(const void* key, size_t keySize, const void* value,
58 size_t valueSize) {
Mathias Agopian5f549b22017-03-08 22:27:13 -080059 if (mMaxKeySize < keySize) {
Yiwei Zhang8af03062020-08-12 21:28:15 -070060 ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize,
61 mMaxKeySize);
Leon Scroggins III23377962022-05-03 14:53:14 -040062 return InsertResult::kKeyTooBig;
Mathias Agopian5f549b22017-03-08 22:27:13 -080063 }
64 if (mMaxValueSize < valueSize) {
Yiwei Zhang8af03062020-08-12 21:28:15 -070065 ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize,
66 mMaxValueSize);
Leon Scroggins III23377962022-05-03 14:53:14 -040067 return InsertResult::kValueTooBig;
Mathias Agopian5f549b22017-03-08 22:27:13 -080068 }
69 if (mMaxTotalSize < keySize + valueSize) {
70 ALOGV("set: not caching because the combined key/value size is too "
Yiwei Zhang8af03062020-08-12 21:28:15 -070071 "large: %zu (limit: %zu)",
72 keySize + valueSize, mMaxTotalSize);
Leon Scroggins III23377962022-05-03 14:53:14 -040073 return InsertResult::kCombinedTooBig;
Mathias Agopian5f549b22017-03-08 22:27:13 -080074 }
75 if (keySize == 0) {
76 ALOGW("set: not caching because keySize is 0");
Leon Scroggins III23377962022-05-03 14:53:14 -040077 return InsertResult::kInvalidKeySize;
Mathias Agopian5f549b22017-03-08 22:27:13 -080078 }
Leon Scroggins III23377962022-05-03 14:53:14 -040079 if (valueSize == 0) {
Mathias Agopian5f549b22017-03-08 22:27:13 -080080 ALOGW("set: not caching because valueSize is 0");
Leon Scroggins III23377962022-05-03 14:53:14 -040081 return InsertResult::kInvalidValueSize;
Mathias Agopian5f549b22017-03-08 22:27:13 -080082 }
83
Yiwei Zhang26169cd2020-07-28 15:46:12 -070084 std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
85 CacheEntry cacheEntry(cacheKey, nullptr);
Mathias Agopian5f549b22017-03-08 22:27:13 -080086
Leon Scroggins III23377962022-05-03 14:53:14 -040087 bool didClean = false;
Mathias Agopian5f549b22017-03-08 22:27:13 -080088 while (true) {
Yiwei Zhang26169cd2020-07-28 15:46:12 -070089 auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
90 if (index == mCacheEntries.end() || cacheEntry < *index) {
Mathias Agopian5f549b22017-03-08 22:27:13 -080091 // Create a new cache entry.
Mathias Agopianb7f9a242017-03-08 22:29:31 -080092 std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
93 std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
Mathias Agopian5f549b22017-03-08 22:27:13 -080094 size_t newTotalSize = mTotalSize + keySize + valueSize;
95 if (mMaxTotalSize < newTotalSize) {
96 if (isCleanable()) {
97 // Clean the cache and try again.
98 clean();
Leon Scroggins III23377962022-05-03 14:53:14 -040099 didClean = true;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800100 continue;
101 } else {
102 ALOGV("set: not caching new key/value pair because the "
Yiwei Zhang8af03062020-08-12 21:28:15 -0700103 "total cache size limit would be exceeded: %zu "
104 "(limit: %zu)",
105 keySize + valueSize, mMaxTotalSize);
Leon Scroggins III23377962022-05-03 14:53:14 -0400106 return InsertResult::kNotEnoughSpace;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800107 }
108 }
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800109 mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
Mathias Agopian5f549b22017-03-08 22:27:13 -0800110 mTotalSize = newTotalSize;
Yiwei Zhang8af03062020-08-12 21:28:15 -0700111 ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize,
112 valueSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800113 } else {
114 // Update the existing cache entry.
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800115 std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
116 std::shared_ptr<Blob> oldValueBlob(index->getValue());
Mathias Agopian5f549b22017-03-08 22:27:13 -0800117 size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
118 if (mMaxTotalSize < newTotalSize) {
119 if (isCleanable()) {
120 // Clean the cache and try again.
121 clean();
Leon Scroggins III23377962022-05-03 14:53:14 -0400122 didClean = true;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800123 continue;
124 } else {
125 ALOGV("set: not caching new value because the total cache "
Yiwei Zhang8af03062020-08-12 21:28:15 -0700126 "size limit would be exceeded: %zu (limit: %zu)",
127 keySize + valueSize, mMaxTotalSize);
Leon Scroggins III23377962022-05-03 14:53:14 -0400128 return InsertResult::kNotEnoughSpace;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800129 }
130 }
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800131 index->setValue(valueBlob);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800132 mTotalSize = newTotalSize;
133 ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
Yiwei Zhang8af03062020-08-12 21:28:15 -0700134 "value",
135 keySize, valueSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800136 }
Leon Scroggins III23377962022-05-03 14:53:14 -0400137 return didClean ? InsertResult::kDidClean : InsertResult::kInserted;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800138 }
139}
140
Yiwei Zhang8af03062020-08-12 21:28:15 -0700141size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800142 if (mMaxKeySize < keySize) {
Yiwei Zhang8af03062020-08-12 21:28:15 -0700143 ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize,
144 mMaxKeySize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800145 return 0;
146 }
Yiwei Zhang26169cd2020-07-28 15:46:12 -0700147 std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
148 CacheEntry cacheEntry(cacheKey, nullptr);
149 auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
150 if (index == mCacheEntries.end() || cacheEntry < *index) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800151 ALOGV("get: no cache entry found for key of size %zu", keySize);
152 return 0;
153 }
154
155 // The key was found. Return the value if the caller's buffer is large
156 // enough.
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800157 std::shared_ptr<Blob> valueBlob(index->getValue());
Mathias Agopian5f549b22017-03-08 22:27:13 -0800158 size_t valueBlobSize = valueBlob->getSize();
159 if (valueBlobSize <= valueSize) {
160 ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
161 memcpy(value, valueBlob->getData(), valueBlobSize);
162 } else {
Yiwei Zhang8af03062020-08-12 21:28:15 -0700163 ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", valueSize,
164 valueBlobSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800165 }
166 return valueBlobSize;
167}
168
169static inline size_t align4(size_t size) {
170 return (size + 3) & ~3;
171}
172
173size_t BlobCache::getFlattenedSize() const {
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400174 auto buildId = base::GetProperty("ro.build.id", "");
175 size_t size = align4(sizeof(Header) + buildId.size());
Yiwei Zhang8af03062020-08-12 21:28:15 -0700176 for (const CacheEntry& e : mCacheEntries) {
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800177 std::shared_ptr<Blob> const& keyBlob = e.getKey();
178 std::shared_ptr<Blob> const& valueBlob = e.getValue();
179 size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
Mathias Agopian5f549b22017-03-08 22:27:13 -0800180 }
181 return size;
182}
183
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800184int BlobCache::flatten(void* buffer, size_t size) const {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800185 // Write the cache header
186 if (size < sizeof(Header)) {
187 ALOGE("flatten: not enough room for cache header");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800188 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800189 }
190 Header* header = reinterpret_cast<Header*>(buffer);
191 header->mMagicNumber = blobCacheMagic;
192 header->mBlobCacheVersion = blobCacheVersion;
193 header->mDeviceVersion = blobCacheDeviceVersion;
194 header->mNumEntries = mCacheEntries.size();
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400195 auto buildId = base::GetProperty("ro.build.id", "");
196 header->mBuildIdLength = buildId.size();
197 memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800198
199 // Write cache entries
200 uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
201 off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
Yiwei Zhang8af03062020-08-12 21:28:15 -0700202 for (const CacheEntry& e : mCacheEntries) {
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800203 std::shared_ptr<Blob> const& keyBlob = e.getKey();
204 std::shared_ptr<Blob> const& valueBlob = e.getValue();
Mathias Agopian5f549b22017-03-08 22:27:13 -0800205 size_t keySize = keyBlob->getSize();
206 size_t valueSize = valueBlob->getSize();
207
208 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
209 size_t totalSize = align4(entrySize);
210 if (byteOffset + totalSize > size) {
211 ALOGE("flatten: not enough room for cache entries");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800212 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800213 }
214
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800215 EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800216 eheader->mKeySize = keySize;
217 eheader->mValueSize = valueSize;
218
219 memcpy(eheader->mData, keyBlob->getData(), keySize);
220 memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
221
222 if (totalSize > entrySize) {
223 // We have padding bytes. Those will get written to storage, and contribute to the CRC,
224 // so make sure we zero-them to have reproducible results.
225 memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
226 }
227
228 byteOffset += totalSize;
229 }
230
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800231 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800232}
233
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800234int BlobCache::unflatten(void const* buffer, size_t size) {
Yiwei Zhangbcbfe602023-04-22 04:43:33 +0000235 ATRACE_NAME("BlobCache::unflatten");
236
Mathias Agopian5f549b22017-03-08 22:27:13 -0800237 // All errors should result in the BlobCache being in an empty state.
Tom Cherry4fcb1c32023-02-17 15:13:59 -0800238 clear();
Mathias Agopian5f549b22017-03-08 22:27:13 -0800239
240 // Read the cache header
241 if (size < sizeof(Header)) {
242 ALOGE("unflatten: not enough room for cache header");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800243 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800244 }
245 const Header* header = reinterpret_cast<const Header*>(buffer);
246 if (header->mMagicNumber != blobCacheMagic) {
247 ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800248 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800249 }
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400250 auto buildId = base::GetProperty("ro.build.id", "");
Mathias Agopian5f549b22017-03-08 22:27:13 -0800251 if (header->mBlobCacheVersion != blobCacheVersion ||
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400252 header->mDeviceVersion != blobCacheDeviceVersion ||
253 buildId.size() != header->mBuildIdLength ||
254 strncmp(buildId.c_str(), header->mBuildId, buildId.size())) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800255 // We treat version mismatches as an empty cache.
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800256 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800257 }
258
259 // Read cache entries
260 const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
261 off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
262 size_t numEntries = header->mNumEntries;
263 for (size_t i = 0; i < numEntries; i++) {
264 if (byteOffset + sizeof(EntryHeader) > size) {
Tom Cherry4fcb1c32023-02-17 15:13:59 -0800265 clear();
Mathias Agopian5f549b22017-03-08 22:27:13 -0800266 ALOGE("unflatten: not enough room for cache entry headers");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800267 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800268 }
269
Yiwei Zhang8af03062020-08-12 21:28:15 -0700270 const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800271 size_t keySize = eheader->mKeySize;
272 size_t valueSize = eheader->mValueSize;
273 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
274
275 size_t totalSize = align4(entrySize);
276 if (byteOffset + totalSize > size) {
Tom Cherry4fcb1c32023-02-17 15:13:59 -0800277 clear();
Mathias Agopian5f549b22017-03-08 22:27:13 -0800278 ALOGE("unflatten: not enough room for cache entry headers");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800279 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800280 }
281
282 const uint8_t* data = eheader->mData;
283 set(data, keySize, data + keySize, valueSize);
284
285 byteOffset += totalSize;
286 }
287
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800288 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800289}
290
291long int BlobCache::blob_random() {
292#ifdef _WIN32
293 return rand();
294#else
295 return nrand48(mRandState);
296#endif
297}
298
299void BlobCache::clean() {
Yiwei Zhangbcbfe602023-04-22 04:43:33 +0000300 ATRACE_NAME("BlobCache::clean");
301
Mathias Agopian5f549b22017-03-08 22:27:13 -0800302 // Remove a random cache entry until the total cache size gets below half
303 // the maximum total cache size.
304 while (mTotalSize > mMaxTotalSize / 2) {
305 size_t i = size_t(blob_random() % (mCacheEntries.size()));
306 const CacheEntry& entry(mCacheEntries[i]);
307 mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800308 mCacheEntries.erase(mCacheEntries.begin() + i);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800309 }
310}
311
312bool BlobCache::isCleanable() const {
313 return mTotalSize > mMaxTotalSize / 2;
314}
315
Yiwei Zhang8af03062020-08-12 21:28:15 -0700316BlobCache::Blob::Blob(const void* data, size_t size, bool copyData)
317 : mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) {
Yi Kong48a6cd22018-07-18 10:07:09 -0700318 if (data != nullptr && copyData) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800319 memcpy(const_cast<void*>(mData), data, size);
320 }
321}
322
323BlobCache::Blob::~Blob() {
324 if (mOwnsData) {
325 free(const_cast<void*>(mData));
326 }
327}
328
329bool BlobCache::Blob::operator<(const Blob& rhs) const {
330 if (mSize == rhs.mSize) {
331 return memcmp(mData, rhs.mData, mSize) < 0;
332 } else {
333 return mSize < rhs.mSize;
334 }
335}
336
337const void* BlobCache::Blob::getData() const {
338 return mData;
339}
340
341size_t BlobCache::Blob::getSize() const {
342 return mSize;
343}
344
Yiwei Zhang8af03062020-08-12 21:28:15 -0700345BlobCache::CacheEntry::CacheEntry() {}
Mathias Agopian5f549b22017-03-08 22:27:13 -0800346
Yiwei Zhang8af03062020-08-12 21:28:15 -0700347BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key,
348 const std::shared_ptr<Blob>& value)
349 : mKey(key), mValue(value) {}
Mathias Agopian5f549b22017-03-08 22:27:13 -0800350
Yiwei Zhang8af03062020-08-12 21:28:15 -0700351BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce) : mKey(ce.mKey), mValue(ce.mValue) {}
Mathias Agopian5f549b22017-03-08 22:27:13 -0800352
353bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
354 return *mKey < *rhs.mKey;
355}
356
357const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
358 mKey = rhs.mKey;
359 mValue = rhs.mValue;
360 return *this;
361}
362
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800363std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800364 return mKey;
365}
366
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800367std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800368 return mValue;
369}
370
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800371void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800372 mValue = value;
373}
374
375} // namespace android