binder: remove mutex from transaction processing loop
atomic ops are enough for the various count fields, we don't need a
completely consistent view of them.
Bug: 333946800
Test: atest --test-mapping frameworks/native/libs/binder/TEST_MAPPING
Change-Id: I01f55a36f7421e4955e2b0d3a6500b183573d765
diff --git a/libs/binder/IPCThreadState.cpp b/libs/binder/IPCThreadState.cpp
index ef96f80..ad20cfa 100644
--- a/libs/binder/IPCThreadState.cpp
+++ b/libs/binder/IPCThreadState.cpp
@@ -613,16 +613,20 @@
void IPCThreadState::blockUntilThreadAvailable()
{
- pthread_mutex_lock(&mProcess->mThreadCountLock);
- mProcess->mWaitingForThreads++;
- while (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads) {
- ALOGW("Waiting for thread to be free. mExecutingThreadsCount=%lu mMaxThreads=%lu\n",
- static_cast<unsigned long>(mProcess->mExecutingThreadsCount),
- static_cast<unsigned long>(mProcess->mMaxThreads));
- pthread_cond_wait(&mProcess->mThreadCountDecrement, &mProcess->mThreadCountLock);
- }
- mProcess->mWaitingForThreads--;
- pthread_mutex_unlock(&mProcess->mThreadCountLock);
+ std::unique_lock lock_guard_(mProcess->mOnThreadAvailableLock);
+ mProcess->mOnThreadAvailableWaiting++;
+ mProcess->mOnThreadAvailableCondVar.wait(lock_guard_, [&] {
+ size_t max = mProcess->mMaxThreads;
+ size_t cur = mProcess->mExecutingThreadsCount;
+ if (cur < max) {
+ return true;
+ }
+ ALOGW("Waiting for thread to be free. mExecutingThreadsCount=%" PRId64
+ " mMaxThreads=%" PRId64 "\n",
+ cur, max);
+ return false;
+ });
+ mProcess->mOnThreadAvailableWaiting--;
}
status_t IPCThreadState::getAndExecuteCommand()
@@ -642,34 +646,33 @@
ALOGI("%s", message.c_str());
}
- pthread_mutex_lock(&mProcess->mThreadCountLock);
- mProcess->mExecutingThreadsCount++;
- if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
- mProcess->mStarvationStartTimeMs == 0) {
- mProcess->mStarvationStartTimeMs = uptimeMillis();
+ size_t newThreadsCount = mProcess->mExecutingThreadsCount.fetch_add(1) + 1;
+ if (newThreadsCount >= mProcess->mMaxThreads) {
+ int64_t expected = 0;
+ mProcess->mStarvationStartTimeMs.compare_exchange_strong(expected, uptimeMillis());
}
- pthread_mutex_unlock(&mProcess->mThreadCountLock);
result = executeCommand(cmd);
- pthread_mutex_lock(&mProcess->mThreadCountLock);
- mProcess->mExecutingThreadsCount--;
- if (mProcess->mExecutingThreadsCount < mProcess->mMaxThreads &&
- mProcess->mStarvationStartTimeMs != 0) {
- int64_t starvationTimeMs = uptimeMillis() - mProcess->mStarvationStartTimeMs;
- if (starvationTimeMs > 100) {
- ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms",
- mProcess->mMaxThreads, starvationTimeMs);
+ size_t maxThreads = mProcess->mMaxThreads;
+ newThreadsCount = mProcess->mExecutingThreadsCount.fetch_sub(1) - 1;
+ if (newThreadsCount < maxThreads) {
+ size_t starvationStartTimeMs = mProcess->mStarvationStartTimeMs.exchange(0);
+ if (starvationStartTimeMs != 0) {
+ int64_t starvationTimeMs = uptimeMillis() - starvationStartTimeMs;
+ if (starvationTimeMs > 100) {
+ ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms", maxThreads,
+ starvationTimeMs);
+ }
}
- mProcess->mStarvationStartTimeMs = 0;
}
// Cond broadcast can be expensive, so don't send it every time a binder
// call is processed. b/168806193
- if (mProcess->mWaitingForThreads > 0) {
- pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
+ if (mProcess->mOnThreadAvailableWaiting > 0) {
+ std::lock_guard lock_guard_(mProcess->mOnThreadAvailableLock);
+ mProcess->mOnThreadAvailableCondVar.notify_all();
}
- pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
return result;
@@ -727,10 +730,9 @@
void IPCThreadState::joinThreadPool(bool isMain)
{
- LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
- pthread_mutex_lock(&mProcess->mThreadCountLock);
+ LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(),
+ getpid());
mProcess->mCurrentThreads++;
- pthread_mutex_unlock(&mProcess->mThreadCountLock);
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
mIsLooper = true;
@@ -758,13 +760,11 @@
mOut.writeInt32(BC_EXIT_LOOPER);
mIsLooper = false;
talkWithDriver(false);
- pthread_mutex_lock(&mProcess->mThreadCountLock);
- LOG_ALWAYS_FATAL_IF(mProcess->mCurrentThreads == 0,
- "Threadpool thread count = 0. Thread cannot exist and exit in empty "
- "threadpool\n"
+ size_t oldCount = mProcess->mCurrentThreads.fetch_sub(1);
+ LOG_ALWAYS_FATAL_IF(oldCount == 0,
+ "Threadpool thread count underflowed. Thread cannot exist and exit in "
+ "empty threadpool\n"
"Misconfiguration. Increase threadpool max threads configuration\n");
- mProcess->mCurrentThreads--;
- pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
status_t IPCThreadState::setupPolling(int* fd)
@@ -776,9 +776,7 @@
mOut.writeInt32(BC_ENTER_LOOPER);
flushCommands();
*fd = mProcess->mDriverFD;
- pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mCurrentThreads++;
- pthread_mutex_unlock(&mProcess->mThreadCountLock);
return 0;
}
diff --git a/libs/binder/ProcessState.cpp b/libs/binder/ProcessState.cpp
index fb2781b..4f7cbad 100644
--- a/libs/binder/ProcessState.cpp
+++ b/libs/binder/ProcessState.cpp
@@ -407,9 +407,7 @@
ALOGV("Spawning new pooled thread, name=%s\n", name.c_str());
sp<Thread> t = sp<PoolThread>::make(isMain);
t->run(name.c_str());
- pthread_mutex_lock(&mThreadCountLock);
mKernelStartedThreads++;
- pthread_mutex_unlock(&mThreadCountLock);
}
// TODO: if startThreadPool is called on another thread after the process
// starts up, the kernel might think that it already requested those
@@ -432,19 +430,19 @@
}
size_t ProcessState::getThreadPoolMaxTotalThreadCount() const {
- pthread_mutex_lock(&mThreadCountLock);
- auto detachGuard = make_scope_guard([&]() { pthread_mutex_unlock(&mThreadCountLock); });
-
if (mThreadPoolStarted) {
- LOG_ALWAYS_FATAL_IF(mKernelStartedThreads > mMaxThreads + 1,
- "too many kernel-started threads: %zu > %zu + 1", mKernelStartedThreads,
- mMaxThreads);
+ size_t kernelStarted = mKernelStartedThreads;
+ size_t max = mMaxThreads;
+ size_t current = mCurrentThreads;
+
+ LOG_ALWAYS_FATAL_IF(kernelStarted > max + 1,
+ "too many kernel-started threads: %zu > %zu + 1", kernelStarted, max);
// calling startThreadPool starts a thread
size_t threads = 1;
// the kernel is configured to start up to mMaxThreads more threads
- threads += mMaxThreads;
+ threads += max;
// Users may call IPCThreadState::joinThreadPool directly. We don't
// currently have a way to count this directly (it could be added by
@@ -454,8 +452,8 @@
// in IPCThreadState, temporarily forget about the extra join threads.
// This is okay, because most callers of this method only care about
// having 0, 1, or more threads.
- if (mCurrentThreads > mKernelStartedThreads) {
- threads += mCurrentThreads - mKernelStartedThreads;
+ if (current > kernelStarted) {
+ threads += current - kernelStarted;
}
return threads;
@@ -463,10 +461,9 @@
// must not be initialized or maybe has poll thread setup, we
// currently don't track this in libbinder
- LOG_ALWAYS_FATAL_IF(mKernelStartedThreads != 0,
- "Expecting 0 kernel started threads but have"
- " %zu",
- mKernelStartedThreads);
+ size_t kernelStarted = mKernelStartedThreads;
+ LOG_ALWAYS_FATAL_IF(kernelStarted != 0, "Expecting 0 kernel started threads but have %zu",
+ kernelStarted);
return mCurrentThreads;
}
@@ -553,10 +550,7 @@
: mDriverName(String8(driver)),
mDriverFD(-1),
mVMStart(MAP_FAILED),
- mThreadCountLock(PTHREAD_MUTEX_INITIALIZER),
- mThreadCountDecrement(PTHREAD_COND_INITIALIZER),
mExecutingThreadsCount(0),
- mWaitingForThreads(0),
mMaxThreads(DEFAULT_MAX_BINDER_THREADS),
mCurrentThreads(0),
mKernelStartedThreads(0),
diff --git a/libs/binder/include/binder/ProcessState.h b/libs/binder/include/binder/ProcessState.h
index a466638..11898a0 100644
--- a/libs/binder/include/binder/ProcessState.h
+++ b/libs/binder/include/binder/ProcessState.h
@@ -23,6 +23,7 @@
#include <pthread.h>
+#include <atomic>
#include <mutex>
// ---------------------------------------------------------------------------
@@ -162,22 +163,21 @@
int mDriverFD;
void* mVMStart;
- // Protects thread count and wait variables below.
- mutable pthread_mutex_t mThreadCountLock;
- // Broadcast whenever mWaitingForThreads > 0
- pthread_cond_t mThreadCountDecrement;
+ mutable std::mutex mOnThreadAvailableLock;
+ std::condition_variable mOnThreadAvailableCondVar;
+ // Number of threads waiting on `mOnThreadAvailableCondVar`.
+ std::atomic_int64_t mOnThreadAvailableWaiting = 0;
+
// Number of binder threads current executing a command.
- size_t mExecutingThreadsCount;
- // Number of threads calling IPCThreadState::blockUntilThreadAvailable()
- size_t mWaitingForThreads;
+ std::atomic_size_t mExecutingThreadsCount;
// Maximum number of lazy threads to be started in the threadpool by the kernel.
- size_t mMaxThreads;
+ std::atomic_size_t mMaxThreads;
// Current number of threads inside the thread pool.
- size_t mCurrentThreads;
+ std::atomic_size_t mCurrentThreads;
// Current number of pooled threads inside the thread pool.
- size_t mKernelStartedThreads;
+ std::atomic_size_t mKernelStartedThreads;
// Time when thread pool was emptied
- int64_t mStarvationStartTimeMs;
+ std::atomic_int64_t mStarvationStartTimeMs;
mutable std::mutex mLock; // protects everything below.