Add support for caching small reads.
Add benchmarking to verify this is faster.
Test: Ran unit tests.
Change-Id: I1487114331f4581ec2368e56c4f18c6e3e6bcc7d
diff --git a/libunwindstack/Memory.cpp b/libunwindstack/Memory.cpp
index cfa8c6d..a30d65e 100644
--- a/libunwindstack/Memory.cpp
+++ b/libunwindstack/Memory.cpp
@@ -174,6 +174,13 @@
return std::shared_ptr<Memory>(new MemoryRemote(pid));
}
+std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
+ if (pid == getpid()) {
+ return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
+ }
+ return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
+}
+
size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
if (addr >= raw_.size()) {
return 0;
@@ -398,4 +405,50 @@
return 0;
}
+size_t MemoryCache::Read(uint64_t addr, void* dst, size_t size) {
+ // Only bother caching and looking at the cache if this is a small read for now.
+ if (size > 64) {
+ return impl_->Read(addr, dst, size);
+ }
+
+ uint64_t addr_page = addr >> kCacheBits;
+ auto entry = cache_.find(addr_page);
+ uint8_t* cache_dst;
+ if (entry != cache_.end()) {
+ cache_dst = entry->second;
+ } else {
+ cache_dst = cache_[addr_page];
+ if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
+ // Erase the entry.
+ cache_.erase(addr_page);
+ return impl_->Read(addr, dst, size);
+ }
+ }
+ size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
+ if (size <= max_read) {
+ memcpy(dst, &cache_dst[addr & kCacheMask], size);
+ return size;
+ }
+
+ // The read crossed into another cached entry, since a read can only cross
+ // into one extra cached page, duplicate the code rather than looping.
+ memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
+ dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
+ addr_page++;
+
+ entry = cache_.find(addr_page);
+ if (entry != cache_.end()) {
+ cache_dst = entry->second;
+ } else {
+ cache_dst = cache_[addr_page];
+ if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
+ // Erase the entry.
+ cache_.erase(addr_page);
+ return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
+ }
+ }
+ memcpy(dst, cache_dst, size - max_read);
+ return size;
+}
+
} // namespace unwindstack