Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 1 | /* |
| 2 | ** Copyright 2011, The Android Open Source Project |
| 3 | ** |
| 4 | ** Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | ** you may not use this file except in compliance with the License. |
| 6 | ** You may obtain a copy of the License at |
| 7 | ** |
| 8 | ** http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | ** |
| 10 | ** Unless required by applicable law or agreed to in writing, software |
| 11 | ** distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | ** See the License for the specific language governing permissions and |
| 14 | ** limitations under the License. |
| 15 | */ |
| 16 | |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 17 | //#define LOG_NDEBUG 0 |
| 18 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 19 | #include "BlobCache.h" |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 20 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 21 | #include <android-base/properties.h> |
Dan Albert | 9f93afe | 2017-10-11 12:42:46 -0700 | [diff] [blame] | 22 | #include <errno.h> |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 23 | #include <inttypes.h> |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 24 | #include <log/log.h> |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 25 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 26 | #include <chrono> |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 27 | |
| 28 | namespace android { |
| 29 | |
| 30 | // BlobCache::Header::mMagicNumber value |
| 31 | static const uint32_t blobCacheMagic = ('_' << 24) + ('B' << 16) + ('b' << 8) + '$'; |
| 32 | |
| 33 | // BlobCache::Header::mBlobCacheVersion value |
| 34 | static const uint32_t blobCacheVersion = 3; |
| 35 | |
| 36 | // BlobCache::Header::mDeviceVersion value |
| 37 | static const uint32_t blobCacheDeviceVersion = 1; |
| 38 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 39 | BlobCache::BlobCache(size_t maxKeySize, size_t maxValueSize, size_t maxTotalSize) |
| 40 | : mMaxTotalSize(maxTotalSize), |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 41 | mMaxKeySize(maxKeySize), |
| 42 | mMaxValueSize(maxValueSize), |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 43 | mTotalSize(0) { |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 44 | int64_t now = std::chrono::steady_clock::now().time_since_epoch().count(); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 45 | #ifdef _WIN32 |
| 46 | srand(now); |
| 47 | #else |
| 48 | mRandState[0] = (now >> 0) & 0xFFFF; |
| 49 | mRandState[1] = (now >> 16) & 0xFFFF; |
| 50 | mRandState[2] = (now >> 32) & 0xFFFF; |
| 51 | #endif |
| 52 | ALOGV("initializing random seed using %lld", (unsigned long long)now); |
| 53 | } |
| 54 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 55 | void BlobCache::set(const void* key, size_t keySize, const void* value, size_t valueSize) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 56 | if (mMaxKeySize < keySize) { |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 57 | ALOGV("set: not caching because the key is too large: %zu (limit: %zu)", keySize, |
| 58 | mMaxKeySize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 59 | return; |
| 60 | } |
| 61 | if (mMaxValueSize < valueSize) { |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 62 | ALOGV("set: not caching because the value is too large: %zu (limit: %zu)", valueSize, |
| 63 | mMaxValueSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 64 | return; |
| 65 | } |
| 66 | if (mMaxTotalSize < keySize + valueSize) { |
| 67 | ALOGV("set: not caching because the combined key/value size is too " |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 68 | "large: %zu (limit: %zu)", |
| 69 | keySize + valueSize, mMaxTotalSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 70 | return; |
| 71 | } |
| 72 | if (keySize == 0) { |
| 73 | ALOGW("set: not caching because keySize is 0"); |
| 74 | return; |
| 75 | } |
| 76 | if (valueSize <= 0) { |
| 77 | ALOGW("set: not caching because valueSize is 0"); |
| 78 | return; |
| 79 | } |
| 80 | |
Yiwei Zhang | 26169cd | 2020-07-28 15:46:12 -0700 | [diff] [blame] | 81 | std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false)); |
| 82 | CacheEntry cacheEntry(cacheKey, nullptr); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 83 | |
| 84 | while (true) { |
Yiwei Zhang | 26169cd | 2020-07-28 15:46:12 -0700 | [diff] [blame] | 85 | auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry); |
| 86 | if (index == mCacheEntries.end() || cacheEntry < *index) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 87 | // Create a new cache entry. |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 88 | std::shared_ptr<Blob> keyBlob(new Blob(key, keySize, true)); |
| 89 | std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true)); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 90 | size_t newTotalSize = mTotalSize + keySize + valueSize; |
| 91 | if (mMaxTotalSize < newTotalSize) { |
| 92 | if (isCleanable()) { |
| 93 | // Clean the cache and try again. |
| 94 | clean(); |
| 95 | continue; |
| 96 | } else { |
| 97 | ALOGV("set: not caching new key/value pair because the " |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 98 | "total cache size limit would be exceeded: %zu " |
| 99 | "(limit: %zu)", |
| 100 | keySize + valueSize, mMaxTotalSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 101 | break; |
| 102 | } |
| 103 | } |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 104 | mCacheEntries.insert(index, CacheEntry(keyBlob, valueBlob)); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 105 | mTotalSize = newTotalSize; |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 106 | ALOGV("set: created new cache entry with %zu byte key and %zu byte value", keySize, |
| 107 | valueSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 108 | } else { |
| 109 | // Update the existing cache entry. |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 110 | std::shared_ptr<Blob> valueBlob(new Blob(value, valueSize, true)); |
| 111 | std::shared_ptr<Blob> oldValueBlob(index->getValue()); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 112 | size_t newTotalSize = mTotalSize + valueSize - oldValueBlob->getSize(); |
| 113 | if (mMaxTotalSize < newTotalSize) { |
| 114 | if (isCleanable()) { |
| 115 | // Clean the cache and try again. |
| 116 | clean(); |
| 117 | continue; |
| 118 | } else { |
| 119 | ALOGV("set: not caching new value because the total cache " |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 120 | "size limit would be exceeded: %zu (limit: %zu)", |
| 121 | keySize + valueSize, mMaxTotalSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 122 | break; |
| 123 | } |
| 124 | } |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 125 | index->setValue(valueBlob); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 126 | mTotalSize = newTotalSize; |
| 127 | ALOGV("set: updated existing cache entry with %zu byte key and %zu byte " |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 128 | "value", |
| 129 | keySize, valueSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 130 | } |
| 131 | break; |
| 132 | } |
| 133 | } |
| 134 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 135 | size_t BlobCache::get(const void* key, size_t keySize, void* value, size_t valueSize) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 136 | if (mMaxKeySize < keySize) { |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 137 | ALOGV("get: not searching because the key is too large: %zu (limit %zu)", keySize, |
| 138 | mMaxKeySize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 139 | return 0; |
| 140 | } |
Yiwei Zhang | 26169cd | 2020-07-28 15:46:12 -0700 | [diff] [blame] | 141 | std::shared_ptr<Blob> cacheKey(new Blob(key, keySize, false)); |
| 142 | CacheEntry cacheEntry(cacheKey, nullptr); |
| 143 | auto index = std::lower_bound(mCacheEntries.begin(), mCacheEntries.end(), cacheEntry); |
| 144 | if (index == mCacheEntries.end() || cacheEntry < *index) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 145 | ALOGV("get: no cache entry found for key of size %zu", keySize); |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | // The key was found. Return the value if the caller's buffer is large |
| 150 | // enough. |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 151 | std::shared_ptr<Blob> valueBlob(index->getValue()); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 152 | size_t valueBlobSize = valueBlob->getSize(); |
| 153 | if (valueBlobSize <= valueSize) { |
| 154 | ALOGV("get: copying %zu bytes to caller's buffer", valueBlobSize); |
| 155 | memcpy(value, valueBlob->getData(), valueBlobSize); |
| 156 | } else { |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 157 | ALOGV("get: caller's buffer is too small for value: %zu (needs %zu)", valueSize, |
| 158 | valueBlobSize); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 159 | } |
| 160 | return valueBlobSize; |
| 161 | } |
| 162 | |
| 163 | static inline size_t align4(size_t size) { |
| 164 | return (size + 3) & ~3; |
| 165 | } |
| 166 | |
| 167 | size_t BlobCache::getFlattenedSize() const { |
Michael Hoisie | 4e0f56b | 2020-04-30 18:40:55 -0400 | [diff] [blame] | 168 | auto buildId = base::GetProperty("ro.build.id", ""); |
| 169 | size_t size = align4(sizeof(Header) + buildId.size()); |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 170 | for (const CacheEntry& e : mCacheEntries) { |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 171 | std::shared_ptr<Blob> const& keyBlob = e.getKey(); |
| 172 | std::shared_ptr<Blob> const& valueBlob = e.getValue(); |
| 173 | size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize()); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 174 | } |
| 175 | return size; |
| 176 | } |
| 177 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 178 | int BlobCache::flatten(void* buffer, size_t size) const { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 179 | // Write the cache header |
| 180 | if (size < sizeof(Header)) { |
| 181 | ALOGE("flatten: not enough room for cache header"); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 182 | return 0; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 183 | } |
| 184 | Header* header = reinterpret_cast<Header*>(buffer); |
| 185 | header->mMagicNumber = blobCacheMagic; |
| 186 | header->mBlobCacheVersion = blobCacheVersion; |
| 187 | header->mDeviceVersion = blobCacheDeviceVersion; |
| 188 | header->mNumEntries = mCacheEntries.size(); |
Michael Hoisie | 4e0f56b | 2020-04-30 18:40:55 -0400 | [diff] [blame] | 189 | auto buildId = base::GetProperty("ro.build.id", ""); |
| 190 | header->mBuildIdLength = buildId.size(); |
| 191 | memcpy(header->mBuildId, buildId.c_str(), header->mBuildIdLength); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 192 | |
| 193 | // Write cache entries |
| 194 | uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer); |
| 195 | off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength); |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 196 | for (const CacheEntry& e : mCacheEntries) { |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 197 | std::shared_ptr<Blob> const& keyBlob = e.getKey(); |
| 198 | std::shared_ptr<Blob> const& valueBlob = e.getValue(); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 199 | size_t keySize = keyBlob->getSize(); |
| 200 | size_t valueSize = valueBlob->getSize(); |
| 201 | |
| 202 | size_t entrySize = sizeof(EntryHeader) + keySize + valueSize; |
| 203 | size_t totalSize = align4(entrySize); |
| 204 | if (byteOffset + totalSize > size) { |
| 205 | ALOGE("flatten: not enough room for cache entries"); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 206 | return -EINVAL; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 207 | } |
| 208 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 209 | EntryHeader* eheader = reinterpret_cast<EntryHeader*>(&byteBuffer[byteOffset]); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 210 | eheader->mKeySize = keySize; |
| 211 | eheader->mValueSize = valueSize; |
| 212 | |
| 213 | memcpy(eheader->mData, keyBlob->getData(), keySize); |
| 214 | memcpy(eheader->mData + keySize, valueBlob->getData(), valueSize); |
| 215 | |
| 216 | if (totalSize > entrySize) { |
| 217 | // We have padding bytes. Those will get written to storage, and contribute to the CRC, |
| 218 | // so make sure we zero-them to have reproducible results. |
| 219 | memset(eheader->mData + keySize + valueSize, 0, totalSize - entrySize); |
| 220 | } |
| 221 | |
| 222 | byteOffset += totalSize; |
| 223 | } |
| 224 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 225 | return 0; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 226 | } |
| 227 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 228 | int BlobCache::unflatten(void const* buffer, size_t size) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 229 | // All errors should result in the BlobCache being in an empty state. |
| 230 | mCacheEntries.clear(); |
| 231 | |
| 232 | // Read the cache header |
| 233 | if (size < sizeof(Header)) { |
| 234 | ALOGE("unflatten: not enough room for cache header"); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 235 | return -EINVAL; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 236 | } |
| 237 | const Header* header = reinterpret_cast<const Header*>(buffer); |
| 238 | if (header->mMagicNumber != blobCacheMagic) { |
| 239 | ALOGE("unflatten: bad magic number: %" PRIu32, header->mMagicNumber); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 240 | return -EINVAL; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 241 | } |
Michael Hoisie | 4e0f56b | 2020-04-30 18:40:55 -0400 | [diff] [blame] | 242 | auto buildId = base::GetProperty("ro.build.id", ""); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 243 | if (header->mBlobCacheVersion != blobCacheVersion || |
Michael Hoisie | 4e0f56b | 2020-04-30 18:40:55 -0400 | [diff] [blame] | 244 | header->mDeviceVersion != blobCacheDeviceVersion || |
| 245 | buildId.size() != header->mBuildIdLength || |
| 246 | strncmp(buildId.c_str(), header->mBuildId, buildId.size())) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 247 | // We treat version mismatches as an empty cache. |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 248 | return 0; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | // Read cache entries |
| 252 | const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer); |
| 253 | off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength); |
| 254 | size_t numEntries = header->mNumEntries; |
| 255 | for (size_t i = 0; i < numEntries; i++) { |
| 256 | if (byteOffset + sizeof(EntryHeader) > size) { |
| 257 | mCacheEntries.clear(); |
| 258 | ALOGE("unflatten: not enough room for cache entry headers"); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 259 | return -EINVAL; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 260 | } |
| 261 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 262 | const EntryHeader* eheader = reinterpret_cast<const EntryHeader*>(&byteBuffer[byteOffset]); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 263 | size_t keySize = eheader->mKeySize; |
| 264 | size_t valueSize = eheader->mValueSize; |
| 265 | size_t entrySize = sizeof(EntryHeader) + keySize + valueSize; |
| 266 | |
| 267 | size_t totalSize = align4(entrySize); |
| 268 | if (byteOffset + totalSize > size) { |
| 269 | mCacheEntries.clear(); |
| 270 | ALOGE("unflatten: not enough room for cache entry headers"); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 271 | return -EINVAL; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | const uint8_t* data = eheader->mData; |
| 275 | set(data, keySize, data + keySize, valueSize); |
| 276 | |
| 277 | byteOffset += totalSize; |
| 278 | } |
| 279 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 280 | return 0; |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | long int BlobCache::blob_random() { |
| 284 | #ifdef _WIN32 |
| 285 | return rand(); |
| 286 | #else |
| 287 | return nrand48(mRandState); |
| 288 | #endif |
| 289 | } |
| 290 | |
| 291 | void BlobCache::clean() { |
| 292 | // Remove a random cache entry until the total cache size gets below half |
| 293 | // the maximum total cache size. |
| 294 | while (mTotalSize > mMaxTotalSize / 2) { |
| 295 | size_t i = size_t(blob_random() % (mCacheEntries.size())); |
| 296 | const CacheEntry& entry(mCacheEntries[i]); |
| 297 | mTotalSize -= entry.getKey()->getSize() + entry.getValue()->getSize(); |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 298 | mCacheEntries.erase(mCacheEntries.begin() + i); |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 299 | } |
| 300 | } |
| 301 | |
| 302 | bool BlobCache::isCleanable() const { |
| 303 | return mTotalSize > mMaxTotalSize / 2; |
| 304 | } |
| 305 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 306 | BlobCache::Blob::Blob(const void* data, size_t size, bool copyData) |
| 307 | : mData(copyData ? malloc(size) : data), mSize(size), mOwnsData(copyData) { |
Yi Kong | 48a6cd2 | 2018-07-18 10:07:09 -0700 | [diff] [blame] | 308 | if (data != nullptr && copyData) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 309 | memcpy(const_cast<void*>(mData), data, size); |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | BlobCache::Blob::~Blob() { |
| 314 | if (mOwnsData) { |
| 315 | free(const_cast<void*>(mData)); |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | bool BlobCache::Blob::operator<(const Blob& rhs) const { |
| 320 | if (mSize == rhs.mSize) { |
| 321 | return memcmp(mData, rhs.mData, mSize) < 0; |
| 322 | } else { |
| 323 | return mSize < rhs.mSize; |
| 324 | } |
| 325 | } |
| 326 | |
| 327 | const void* BlobCache::Blob::getData() const { |
| 328 | return mData; |
| 329 | } |
| 330 | |
| 331 | size_t BlobCache::Blob::getSize() const { |
| 332 | return mSize; |
| 333 | } |
| 334 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 335 | BlobCache::CacheEntry::CacheEntry() {} |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 336 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 337 | BlobCache::CacheEntry::CacheEntry(const std::shared_ptr<Blob>& key, |
| 338 | const std::shared_ptr<Blob>& value) |
| 339 | : mKey(key), mValue(value) {} |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 340 | |
Yiwei Zhang | 8af0306 | 2020-08-12 21:28:15 -0700 | [diff] [blame^] | 341 | BlobCache::CacheEntry::CacheEntry(const CacheEntry& ce) : mKey(ce.mKey), mValue(ce.mValue) {} |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 342 | |
| 343 | bool BlobCache::CacheEntry::operator<(const CacheEntry& rhs) const { |
| 344 | return *mKey < *rhs.mKey; |
| 345 | } |
| 346 | |
| 347 | const BlobCache::CacheEntry& BlobCache::CacheEntry::operator=(const CacheEntry& rhs) { |
| 348 | mKey = rhs.mKey; |
| 349 | mValue = rhs.mValue; |
| 350 | return *this; |
| 351 | } |
| 352 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 353 | std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getKey() const { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 354 | return mKey; |
| 355 | } |
| 356 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 357 | std::shared_ptr<BlobCache::Blob> BlobCache::CacheEntry::getValue() const { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 358 | return mValue; |
| 359 | } |
| 360 | |
Mathias Agopian | b7f9a24 | 2017-03-08 22:29:31 -0800 | [diff] [blame] | 361 | void BlobCache::CacheEntry::setValue(const std::shared_ptr<Blob>& value) { |
Mathias Agopian | 5f549b2 | 2017-03-08 22:27:13 -0800 | [diff] [blame] | 362 | mValue = value; |
| 363 | } |
| 364 | |
| 365 | } // namespace android |