blob: beca7f191a43818fc15b2766f05a19d5d0226092 [file] [log] [blame]
Mathias Agopian5f549b22017-03-08 22:27:13 -08001/*
2 ** Copyright 2011, The Android Open Source Project
3 **
4 ** Licensed under the Apache License, Version 2.0 (the "License");
5 ** you may not use this file except in compliance with the License.
6 ** You may obtain a copy of the License at
7 **
8 ** http://www.apache.org/licenses/LICENSE-2.0
9 **
10 ** Unless required by applicable law or agreed to in writing, software
11 ** distributed under the License is distributed on an "AS IS" BASIS,
12 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 ** See the License for the specific language governing permissions and
14 ** limitations under the License.
15 */
16
Mathias Agopian5f549b22017-03-08 22:27:13 -080017//#define LOG_NDEBUG 0
18
Mathias Agopianb7f9a242017-03-08 22:29:31 -080019#include "BlobCache.h"
Mathias Agopian5f549b22017-03-08 22:27:13 -080020
Yiwei Zhang8af03062020-08-12 21:28:15 -070021#include <android-base/properties.h>
Dan Albert9f93afe2017-10-11 12:42:46 -070022#include <errno.h>
Mathias Agopian5f549b22017-03-08 22:27:13 -080023#include <inttypes.h>
Mathias Agopianb7f9a242017-03-08 22:29:31 -080024#include <log/log.h>
Yiwei Zhang8af03062020-08-12 21:28:15 -070025
Mathias Agopianb7f9a242017-03-08 22:29:31 -080026#include <chrono>
Mathias Agopian5f549b22017-03-08 22:27:13 -080027
28namespace android {
29
30// BlobCache::Header::mMagicNumber value
31static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$';
32
33// BlobCache::Header::mBlobCacheVersion value
34static const uint32_t blobCacheVersion = 3;
35
36// BlobCache::Header::mDeviceVersion value
37static const uint32_t blobCacheDeviceVersion = 1;
38
Yiwei Zhang8af03062020-08-12 21:28:15 -070039BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize)
40 : mMaxTotalSize(maxTotalSize),
Mathias Agopian5f549b22017-03-08 22:27:13 -080041 mMaxKeySize(maxKeySize),
42 mMaxValueSize(maxValueSize),
Mathias Agopian5f549b22017-03-08 22:27:13 -080043 mTotalSize(0) {
Mathias Agopianb7f9a242017-03-08 22:29:31 -080044 int64_t now = std::chrono::steady_clock::now().time_since_epoch().count();
Mathias Agopian5f549b22017-03-08 22:27:13 -080045#ifdef _WIN32
46 srand(now);
47#else
48 mRandState[0] = (now >> 0) & 0xFFFF;
49 mRandState[1] = (now >> 16) & 0xFFFF;
50 mRandState[2] = (now >> 32) & 0xFFFF;
51#endif
52 ALOGV("initializing random seed using %lld", (unsigned long long)now);
53}
54
Yiwei Zhang8af03062020-08-12 21:28:15 -070055void BlobCache::set(const void* key, size_t keySize, const void* value, size_t valueSize) {
Mathias Agopian5f549b22017-03-08 22:27:13 -080056 if (mMaxKeySize < keySize) {
Yiwei Zhang8af03062020-08-12 21:28:15 -070057 ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize,
58 mMaxKeySize);
Mathias Agopian5f549b22017-03-08 22:27:13 -080059 return;
60 }
61 if (mMaxValueSize < valueSize) {
Yiwei Zhang8af03062020-08-12 21:28:15 -070062 ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize,
63 mMaxValueSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -080064 return;
65 }
66 if (mMaxTotalSize < keySize + valueSize) {
67 ALOGV("set: not caching because the combined key/value size is too "
Yiwei Zhang8af03062020-08-12 21:28:15 -070068 "large: %zu (limit: %zu)",
69 keySize + valueSize, mMaxTotalSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -080070 return;
71 }
72 if (keySize == 0) {
73 ALOGW("set: not caching because keySize is 0");
74 return;
75 }
76 if (valueSize <= 0) {
77 ALOGW("set: not caching because valueSize is 0");
78 return;
79 }
80
Yiwei Zhang26169cd2020-07-28 15:46:12 -070081 std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
82 CacheEntry cacheEntry(cacheKey, nullptr);
Mathias Agopian5f549b22017-03-08 22:27:13 -080083
84 while (true) {
Yiwei Zhang26169cd2020-07-28 15:46:12 -070085 auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
86 if (index == mCacheEntries.end() || cacheEntry < *index) {
Mathias Agopian5f549b22017-03-08 22:27:13 -080087 // Create a new cache entry.
Mathias Agopianb7f9a242017-03-08 22:29:31 -080088 std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true));
89 std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
Mathias Agopian5f549b22017-03-08 22:27:13 -080090 size_t newTotalSize = mTotalSize + keySize + valueSize;
91 if (mMaxTotalSize < newTotalSize) {
92 if (isCleanable()) {
93 // Clean the cache and try again.
94 clean();
95 continue;
96 } else {
97 ALOGV("set: not caching new key/value pair because the "
Yiwei Zhang8af03062020-08-12 21:28:15 -070098 "total cache size limit would be exceeded: %zu "
99 "(limit: %zu)",
100 keySize + valueSize, mMaxTotalSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800101 break;
102 }
103 }
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800104 mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob));
Mathias Agopian5f549b22017-03-08 22:27:13 -0800105 mTotalSize = newTotalSize;
Yiwei Zhang8af03062020-08-12 21:28:15 -0700106 ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize,
107 valueSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800108 } else {
109 // Update the existing cache entry.
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800110 std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true));
111 std::shared_ptr<Blob> oldValueBlob(index->getValue());
Mathias Agopian5f549b22017-03-08 22:27:13 -0800112 size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize();
113 if (mMaxTotalSize < newTotalSize) {
114 if (isCleanable()) {
115 // Clean the cache and try again.
116 clean();
117 continue;
118 } else {
119 ALOGV("set: not caching new value because the total cache "
Yiwei Zhang8af03062020-08-12 21:28:15 -0700120 "size limit would be exceeded: %zu (limit: %zu)",
121 keySize + valueSize, mMaxTotalSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800122 break;
123 }
124 }
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800125 index->setValue(valueBlob);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800126 mTotalSize = newTotalSize;
127 ALOGV("set: updated existing cache entry with %zu byte key and %zu byte "
Yiwei Zhang8af03062020-08-12 21:28:15 -0700128 "value",
129 keySize, valueSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800130 }
131 break;
132 }
133}
134
Yiwei Zhang8af03062020-08-12 21:28:15 -0700135size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800136 if (mMaxKeySize < keySize) {
Yiwei Zhang8af03062020-08-12 21:28:15 -0700137 ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize,
138 mMaxKeySize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800139 return 0;
140 }
Yiwei Zhang26169cd2020-07-28 15:46:12 -0700141 std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false));
142 CacheEntry cacheEntry(cacheKey, nullptr);
143 auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry);
144 if (index == mCacheEntries.end() || cacheEntry < *index) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800145 ALOGV("get: no cache entry found for key of size %zu", keySize);
146 return 0;
147 }
148
149 // The key was found. Return the value if the caller's buffer is large
150 // enough.
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800151 std::shared_ptr<Blob> valueBlob(index->getValue());
Mathias Agopian5f549b22017-03-08 22:27:13 -0800152 size_t valueBlobSize = valueBlob->getSize();
153 if (valueBlobSize <= valueSize) {
154 ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize);
155 memcpy(value, valueBlob->getData(), valueBlobSize);
156 } else {
Yiwei Zhang8af03062020-08-12 21:28:15 -0700157 ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", valueSize,
158 valueBlobSize);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800159 }
160 return valueBlobSize;
161}
162
163static inline size_t align4(size_t size) {
164 return (size + 3) & ~3;
165}
166
167size_t BlobCache::getFlattenedSize() const {
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400168 auto buildId = base::GetProperty("ro.build.id", "");
169 size_t size = align4(sizeof(Header) + buildId.size());
Yiwei Zhang8af03062020-08-12 21:28:15 -0700170 for (const CacheEntry& e : mCacheEntries) {
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800171 std::shared_ptr<Blob> const& keyBlob = e.getKey();
172 std::shared_ptr<Blob> const& valueBlob = e.getValue();
173 size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize());
Mathias Agopian5f549b22017-03-08 22:27:13 -0800174 }
175 return size;
176}
177
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800178int BlobCache::flatten(void* buffer, size_t size) const {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800179 // Write the cache header
180 if (size < sizeof(Header)) {
181 ALOGE("flatten: not enough room for cache header");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800182 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800183 }
184 Header* header = reinterpret_cast<Header*>(buffer);
185 header->mMagicNumber = blobCacheMagic;
186 header->mBlobCacheVersion = blobCacheVersion;
187 header->mDeviceVersion = blobCacheDeviceVersion;
188 header->mNumEntries = mCacheEntries.size();
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400189 auto buildId = base::GetProperty("ro.build.id", "");
190 header->mBuildIdLength = buildId.size();
191 memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800192
193 // Write cache entries
194 uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer);
195 off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
Yiwei Zhang8af03062020-08-12 21:28:15 -0700196 for (const CacheEntry& e : mCacheEntries) {
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800197 std::shared_ptr<Blob> const& keyBlob = e.getKey();
198 std::shared_ptr<Blob> const& valueBlob = e.getValue();
Mathias Agopian5f549b22017-03-08 22:27:13 -0800199 size_t keySize = keyBlob->getSize();
200 size_t valueSize = valueBlob->getSize();
201
202 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
203 size_t totalSize = align4(entrySize);
204 if (byteOffset + totalSize > size) {
205 ALOGE("flatten: not enough room for cache entries");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800206 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800207 }
208
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800209 EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800210 eheader->mKeySize = keySize;
211 eheader->mValueSize = valueSize;
212
213 memcpy(eheader->mData, keyBlob->getData(), keySize);
214 memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize);
215
216 if (totalSize > entrySize) {
217 // We have padding bytes. Those will get written to storage, and contribute to the CRC,
218 // so make sure we zero-them to have reproducible results.
219 memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize);
220 }
221
222 byteOffset += totalSize;
223 }
224
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800225 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800226}
227
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800228int BlobCache::unflatten(void const* buffer, size_t size) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800229 // All errors should result in the BlobCache being in an empty state.
230 mCacheEntries.clear();
231
232 // Read the cache header
233 if (size < sizeof(Header)) {
234 ALOGE("unflatten: not enough room for cache header");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800235 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800236 }
237 const Header* header = reinterpret_cast<const Header*>(buffer);
238 if (header->mMagicNumber != blobCacheMagic) {
239 ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber);
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800240 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800241 }
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400242 auto buildId = base::GetProperty("ro.build.id", "");
Mathias Agopian5f549b22017-03-08 22:27:13 -0800243 if (header->mBlobCacheVersion != blobCacheVersion ||
Michael Hoisie4e0f56b2020-04-30 18:40:55 -0400244 header->mDeviceVersion != blobCacheDeviceVersion ||
245 buildId.size() != header->mBuildIdLength ||
246 strncmp(buildId.c_str(), header->mBuildId, buildId.size())) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800247 // We treat version mismatches as an empty cache.
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800248 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800249 }
250
251 // Read cache entries
252 const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer);
253 off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength);
254 size_t numEntries = header->mNumEntries;
255 for (size_t i = 0; i < numEntries; i++) {
256 if (byteOffset + sizeof(EntryHeader) > size) {
257 mCacheEntries.clear();
258 ALOGE("unflatten: not enough room for cache entry headers");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800259 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800260 }
261
Yiwei Zhang8af03062020-08-12 21:28:15 -0700262 const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800263 size_t keySize = eheader->mKeySize;
264 size_t valueSize = eheader->mValueSize;
265 size_t entrySize = sizeof(EntryHeader) + keySize + valueSize;
266
267 size_t totalSize = align4(entrySize);
268 if (byteOffset + totalSize > size) {
269 mCacheEntries.clear();
270 ALOGE("unflatten: not enough room for cache entry headers");
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800271 return -EINVAL;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800272 }
273
274 const uint8_t* data = eheader->mData;
275 set(data, keySize, data + keySize, valueSize);
276
277 byteOffset += totalSize;
278 }
279
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800280 return 0;
Mathias Agopian5f549b22017-03-08 22:27:13 -0800281}
282
283long int BlobCache::blob_random() {
284#ifdef _WIN32
285 return rand();
286#else
287 return nrand48(mRandState);
288#endif
289}
290
291void BlobCache::clean() {
292 // Remove a random cache entry until the total cache size gets below half
293 // the maximum total cache size.
294 while (mTotalSize > mMaxTotalSize / 2) {
295 size_t i = size_t(blob_random() % (mCacheEntries.size()));
296 const CacheEntry& entry(mCacheEntries[i]);
297 mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize();
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800298 mCacheEntries.erase(mCacheEntries.begin() + i);
Mathias Agopian5f549b22017-03-08 22:27:13 -0800299 }
300}
301
302bool BlobCache::isCleanable() const {
303 return mTotalSize > mMaxTotalSize / 2;
304}
305
Yiwei Zhang8af03062020-08-12 21:28:15 -0700306BlobCache::Blob::Blob(const void* data, size_t size, bool copyData)
307 : mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) {
Yi Kong48a6cd22018-07-18 10:07:09 -0700308 if (data != nullptr && copyData) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800309 memcpy(const_cast<void*>(mData), data, size);
310 }
311}
312
313BlobCache::Blob::~Blob() {
314 if (mOwnsData) {
315 free(const_cast<void*>(mData));
316 }
317}
318
319bool BlobCache::Blob::operator<(const Blob& rhs) const {
320 if (mSize == rhs.mSize) {
321 return memcmp(mData, rhs.mData, mSize) < 0;
322 } else {
323 return mSize < rhs.mSize;
324 }
325}
326
327const void* BlobCache::Blob::getData() const {
328 return mData;
329}
330
331size_t BlobCache::Blob::getSize() const {
332 return mSize;
333}
334
Yiwei Zhang8af03062020-08-12 21:28:15 -0700335BlobCache::CacheEntry::CacheEntry() {}
Mathias Agopian5f549b22017-03-08 22:27:13 -0800336
Yiwei Zhang8af03062020-08-12 21:28:15 -0700337BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key,
338 const std::shared_ptr<Blob>& value)
339 : mKey(key), mValue(value) {}
Mathias Agopian5f549b22017-03-08 22:27:13 -0800340
Yiwei Zhang8af03062020-08-12 21:28:15 -0700341BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce) : mKey(ce.mKey), mValue(ce.mValue) {}
Mathias Agopian5f549b22017-03-08 22:27:13 -0800342
343bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const {
344 return *mKey < *rhs.mKey;
345}
346
347const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) {
348 mKey = rhs.mKey;
349 mValue = rhs.mValue;
350 return *this;
351}
352
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800353std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800354 return mKey;
355}
356
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800357std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800358 return mValue;
359}
360
Mathias Agopianb7f9a242017-03-08 22:29:31 -0800361void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) {
Mathias Agopian5f549b22017-03-08 22:27:13 -0800362 mValue = value;
363}
364
365} // namespace android