Collect Parcel statistics using std::atomics.
Instead of using a heavy pthread_mutex_t for updating size and count
statistics, this change shifts to using faster std::atomic values,
an approach that was already pioneered over in hardware::Parcel.
The Parcel benchmarks referenced below are showing a ~10% perf
improvement for contended cases. Note that this is in addition to
the improvements recently made with the shift to a linked-list
pooling design, and removal of advisory native allocation updates.
Here's the combined improvements of all three changes together;
~2x throughput for the 1-thread case, and ~9x throughput for the
16-thread case.
1 thread 4 threads 16 threads
Combined 50.48% 15.58% 11.41%
Bug: 165032569
Test: ./frameworks/base/libs/hwui/tests/scripts/prep_generic.sh little && atest CorePerfTests:android.os.ParcelObtainPerfTest
Change-Id: I436e70cdfd06e747d5c8fcc0ddd6ecf92737cf9c
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index 8fd59ba..198c275 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -78,9 +78,8 @@
// many things compile this into prebuilts on the stack
static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120);
-static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
-static size_t gParcelGlobalAllocSize = 0;
-static size_t gParcelGlobalAllocCount = 0;
+static std::atomic<size_t> gParcelGlobalAllocCount;
+static std::atomic<size_t> gParcelGlobalAllocSize;
static size_t gMaxFds = 0;
@@ -276,17 +275,11 @@
}
size_t Parcel::getGlobalAllocSize() {
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
- size_t size = gParcelGlobalAllocSize;
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
- return size;
+ return gParcelGlobalAllocSize.load();
}
size_t Parcel::getGlobalAllocCount() {
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
- size_t count = gParcelGlobalAllocCount;
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
- return count;
+ return gParcelGlobalAllocCount.load();
}
const uint8_t* Parcel::data() const
@@ -2626,16 +2619,8 @@
releaseObjects();
if (mData) {
LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
- if (mDataCapacity <= gParcelGlobalAllocSize) {
- gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
- } else {
- gParcelGlobalAllocSize = 0;
- }
- if (gParcelGlobalAllocCount > 0) {
- gParcelGlobalAllocCount--;
- }
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
+ gParcelGlobalAllocSize -= mDataCapacity;
+ gParcelGlobalAllocCount--;
free(mData);
}
if (mObjects) free(mObjects);
@@ -2681,13 +2666,15 @@
if (data || desired == 0) {
LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
- gParcelGlobalAllocSize += desired;
- gParcelGlobalAllocSize -= mDataCapacity;
+ if (mDataCapacity > desired) {
+ gParcelGlobalAllocSize -= (mDataCapacity - desired);
+ } else {
+ gParcelGlobalAllocSize += (desired - mDataCapacity);
+ }
+
if (!mData) {
gParcelGlobalAllocCount++;
}
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataCapacity = desired;
}
@@ -2775,10 +2762,8 @@
mOwner = nullptr;
LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocCount++;
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mObjects = objects;
@@ -2826,10 +2811,8 @@
if (data) {
LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
desired);
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocSize -= mDataCapacity;
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataCapacity = desired;
} else {
@@ -2861,10 +2844,8 @@
}
LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
- pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
gParcelGlobalAllocCount++;
- pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
mData = data;
mDataSize = mDataPos = 0;