Tracking number of threads in threadpools.
Added tests for max total thread count.

Renamed getThreadPoolMaxThreadCount to getThreadPoolMaxTotalThreadCount
which takes into account user joined threads and polling threads.
Test added to check that the current available threads update properly
and do not lock.
Added second test that simulates a deadlock and confirms the deadlock.

Bug: 188834514
Test: binderLibTest
Change-Id: Ia99d95544a38596ec9fc316e623e523b64337bc7
diff --git a/libs/binder/Binder.cpp b/libs/binder/Binder.cpp
index 39befbe..b66e89e 100644
--- a/libs/binder/Binder.cpp
+++ b/libs/binder/Binder.cpp
@@ -539,7 +539,7 @@
         return UNEXPECTED_NULL;
     }
 
-    size_t binderThreadPoolMaxCount = ProcessState::self()->getThreadPoolMaxThreadCount();
+    size_t binderThreadPoolMaxCount = ProcessState::self()->getThreadPoolMaxTotalThreadCount();
     if (binderThreadPoolMaxCount <= 1) {
         ALOGE("%s: ProcessState thread pool max count is %zu. RPC is disabled for this service "
               "because RPC requires the service to support multithreading.",
diff --git a/libs/binder/IPCThreadState.cpp b/libs/binder/IPCThreadState.cpp
index d453ac7..d536219 100644
--- a/libs/binder/IPCThreadState.cpp
+++ b/libs/binder/IPCThreadState.cpp
@@ -638,7 +638,9 @@
 void IPCThreadState::joinThreadPool(bool isMain)
 {
     LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
-
+    pthread_mutex_lock(&mProcess->mThreadCountLock);
+    mProcess->mCurrentThreads++;
+    pthread_mutex_unlock(&mProcess->mThreadCountLock);
     mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
 
     mIsLooper = true;
@@ -666,6 +668,13 @@
     mOut.writeInt32(BC_EXIT_LOOPER);
     mIsLooper = false;
     talkWithDriver(false);
+    pthread_mutex_lock(&mProcess->mThreadCountLock);
+    LOG_ALWAYS_FATAL_IF(mProcess->mCurrentThreads == 0,
+                        "Threadpool thread count = 0. Thread cannot exist and exit in empty "
+                        "threadpool\n"
+                        "Misconfiguration. Increase threadpool max threads configuration\n");
+    mProcess->mCurrentThreads--;
+    pthread_mutex_unlock(&mProcess->mThreadCountLock);
 }
 
 status_t IPCThreadState::setupPolling(int* fd)
@@ -677,6 +686,9 @@
     mOut.writeInt32(BC_ENTER_LOOPER);
     flushCommands();
     *fd = mProcess->mDriverFD;
+    pthread_mutex_lock(&mProcess->mThreadCountLock);
+    mProcess->mCurrentThreads++;
+    pthread_mutex_unlock(&mProcess->mThreadCountLock);
     return 0;
 }
 
diff --git a/libs/binder/ProcessState.cpp b/libs/binder/ProcessState.cpp
index c4cf3e6..90b59d0 100644
--- a/libs/binder/ProcessState.cpp
+++ b/libs/binder/ProcessState.cpp
@@ -182,7 +182,6 @@
             ALOGW("Extra binder thread started, but 0 threads requested. Do not use "
                   "*startThreadPool when zero threads are requested.");
         }
-
         mThreadPoolStarted = true;
         spawnPooledThread(true);
     }
@@ -386,6 +385,7 @@
         ALOGV("Spawning new pooled thread, name=%s\n", name.string());
         sp<Thread> t = sp<PoolThread>::make(isMain);
         t->run(name.string());
+        mKernelStartedThreads++;
     }
 }
 
@@ -402,12 +402,20 @@
     return result;
 }
 
-size_t ProcessState::getThreadPoolMaxThreadCount() const {
+size_t ProcessState::getThreadPoolMaxTotalThreadCount() const {
     // may actually be one more than this, if join is called
-    if (mThreadPoolStarted) return mMaxThreads;
+    if (mThreadPoolStarted) {
+        return mCurrentThreads < mKernelStartedThreads
+                ? mMaxThreads
+                : mMaxThreads + mCurrentThreads - mKernelStartedThreads;
+    }
     // must not be initialized or maybe has poll thread setup, we
     // currently don't track this in libbinder
-    return 0;
+    LOG_ALWAYS_FATAL_IF(mKernelStartedThreads != 0,
+                        "Expecting 0 kernel started threads but have"
+                        " %zu",
+                        mKernelStartedThreads);
+    return mCurrentThreads;
 }
 
 #define DRIVER_FEATURES_PATH "/dev/binderfs/features/"
@@ -493,6 +501,8 @@
         mExecutingThreadsCount(0),
         mWaitingForThreads(0),
         mMaxThreads(DEFAULT_MAX_BINDER_THREADS),
+        mCurrentThreads(0),
+        mKernelStartedThreads(0),
         mStarvationStartTimeMs(0),
         mForked(false),
         mThreadPoolStarted(false),
diff --git a/libs/binder/include/binder/ProcessState.h b/libs/binder/include/binder/ProcessState.h
index 5820802..e17a76c 100644
--- a/libs/binder/include/binder/ProcessState.h
+++ b/libs/binder/include/binder/ProcessState.h
@@ -84,11 +84,11 @@
     void setCallRestriction(CallRestriction restriction);
 
     /**
-     * Get the max number of threads that the kernel can start.
-     *
-     * Note: this is the lower bound. Additional threads may be started.
+     * Get the max number of threads that have joined the thread pool.
+     * This includes kernel started threads, user joined threads and polling
+     * threads if used.
      */
-    size_t getThreadPoolMaxThreadCount() const;
+    size_t getThreadPoolMaxTotalThreadCount() const;
 
     enum class DriverFeature {
         ONEWAY_SPAM_DETECTION,
@@ -133,8 +133,12 @@
     size_t mExecutingThreadsCount;
     // Number of threads calling IPCThreadState::blockUntilThreadAvailable()
     size_t mWaitingForThreads;
-    // Maximum number for binder threads allowed for this process.
+    // Maximum number of lazy threads to be started in the threadpool by the kernel.
     size_t mMaxThreads;
+    // Current number of threads inside the thread pool.
+    size_t mCurrentThreads;
+    // Current number of pooled threads inside the thread pool.
+    size_t mKernelStartedThreads;
     // Time when thread pool was emptied
     int64_t mStarvationStartTimeMs;
 
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index 57a1fda..a0f12cf 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -82,6 +82,7 @@
 static constexpr int kSchedPolicy = SCHED_RR;
 static constexpr int kSchedPriority = 7;
 static constexpr int kSchedPriorityMore = 8;
+static constexpr int kKernelThreads = 15;
 
 static String16 binderLibTestServiceName = String16("test.binderLib");
 
@@ -115,6 +116,12 @@
     BINDER_LIB_TEST_ECHO_VECTOR,
     BINDER_LIB_TEST_REJECT_OBJECTS,
     BINDER_LIB_TEST_CAN_GET_SID,
+    BINDER_LIB_TEST_GET_MAX_THREAD_COUNT,
+    BINDER_LIB_TEST_SET_MAX_THREAD_COUNT,
+    BINDER_LIB_TEST_LOCK_UNLOCK,
+    BINDER_LIB_TEST_PROCESS_LOCK,
+    BINDER_LIB_TEST_UNLOCK_AFTER_MS,
+    BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK
 };
 
 pid_t start_server_process(int arg2, bool usePoll = false)
@@ -1232,6 +1239,76 @@
     EXPECT_EQ(sm->unregisterForNotifications(String16("RogerRafa"), cb), OK);
 }
 
+TEST_F(BinderLibTest, ThreadPoolAvailableThreads) {
+    Parcel data, reply;
+    sp<IBinder> server = addServer();
+    ASSERT_TRUE(server != nullptr);
+    EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_MAX_THREAD_COUNT, data, &reply),
+                StatusEq(NO_ERROR));
+    int32_t replyi = reply.readInt32();
+    // Expect 16 threads: kKernelThreads = 15 + Pool thread == 16
+    EXPECT_TRUE(replyi == kKernelThreads || replyi == kKernelThreads + 1);
+    EXPECT_THAT(server->transact(BINDER_LIB_TEST_PROCESS_LOCK, data, &reply), NO_ERROR);
+
+    /*
+     * This will use all threads in the pool expect the main pool thread.
+     * The service should run fine without locking, and the thread count should
+     * not exceed 16 (15 Max + pool thread).
+     */
+    std::vector<std::thread> ts;
+    for (size_t i = 0; i < kKernelThreads - 1; i++) {
+        ts.push_back(std::thread([&] {
+            EXPECT_THAT(server->transact(BINDER_LIB_TEST_LOCK_UNLOCK, data, &reply), NO_ERROR);
+        }));
+    }
+
+    data.writeInt32(1);
+    // Give a chance for all threads to be used
+    EXPECT_THAT(server->transact(BINDER_LIB_TEST_UNLOCK_AFTER_MS, data, &reply), NO_ERROR);
+
+    for (auto &t : ts) {
+        t.join();
+    }
+
+    EXPECT_THAT(server->transact(BINDER_LIB_TEST_GET_MAX_THREAD_COUNT, data, &reply),
+                StatusEq(NO_ERROR));
+    replyi = reply.readInt32();
+    // No more than 16 threads should exist.
+    EXPECT_EQ(replyi, kKernelThreads + 1);
+}
+
+size_t epochMillis() {
+    using std::chrono::duration_cast;
+    using std::chrono::milliseconds;
+    using std::chrono::seconds;
+    using std::chrono::system_clock;
+    return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
+}
+
+TEST_F(BinderLibTest, HangingServices) {
+    Parcel data, reply;
+    sp<IBinder> server = addServer();
+    ASSERT_TRUE(server != nullptr);
+    int32_t delay = 1000; // ms
+    data.writeInt32(delay);
+    EXPECT_THAT(server->transact(BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK, data, &reply), NO_ERROR);
+    std::vector<std::thread> ts;
+    size_t epochMsBefore = epochMillis();
+    for (size_t i = 0; i < kKernelThreads + 1; i++) {
+        ts.push_back(std::thread([&] {
+            EXPECT_THAT(server->transact(BINDER_LIB_TEST_LOCK_UNLOCK, data, &reply), NO_ERROR);
+        }));
+    }
+
+    for (auto &t : ts) {
+        t.join();
+    }
+    size_t epochMsAfter = epochMillis();
+
+    // deadlock occurred and threads only finished after 1s passed.
+    EXPECT_GE(epochMsAfter, epochMsBefore + delay);
+}
+
 class BinderLibRpcTestBase : public BinderLibTest {
 public:
     void SetUp() override {
@@ -1638,11 +1715,41 @@
             case BINDER_LIB_TEST_CAN_GET_SID: {
                 return IPCThreadState::self()->getCallingSid() == nullptr ? BAD_VALUE : NO_ERROR;
             }
+            case BINDER_LIB_TEST_GET_MAX_THREAD_COUNT: {
+                reply->writeInt32(ProcessState::self()->getThreadPoolMaxTotalThreadCount());
+                return NO_ERROR;
+            }
+            case BINDER_LIB_TEST_PROCESS_LOCK: {
+                blockMutex.lock();
+                return NO_ERROR;
+            }
+            case BINDER_LIB_TEST_LOCK_UNLOCK: {
+                std::lock_guard<std::mutex> _l(blockMutex);
+                return NO_ERROR;
+            }
+            case BINDER_LIB_TEST_UNLOCK_AFTER_MS: {
+                int32_t ms = data.readInt32();
+                return unlockInMs(ms);
+            }
+            case BINDER_LIB_TEST_PROCESS_TEMPORARY_LOCK: {
+                blockMutex.lock();
+                std::thread t([&] {
+                    unlockInMs(data.readInt32());
+                }); // start local thread to unlock in 1s
+                t.detach();
+                return NO_ERROR;
+            }
             default:
                 return UNKNOWN_TRANSACTION;
         };
     }
 
+    status_t unlockInMs(int32_t ms) {
+        usleep(ms * 1000);
+        blockMutex.unlock();
+        return NO_ERROR;
+    }
+
 private:
     int32_t m_id;
     int32_t m_nextServerId;
@@ -1653,6 +1760,7 @@
     sp<IBinder> m_strongRef;
     sp<IBinder> m_callback;
     bool m_exitOnDestroy;
+    std::mutex blockMutex;
 };
 
 int run_server(int index, int readypipefd, bool usePoll)
@@ -1754,6 +1862,7 @@
              }
         }
     } else {
+        ProcessState::self()->setThreadPoolMaxThreadCount(kKernelThreads);
         ProcessState::self()->startThreadPool();
         IPCThreadState::self()->joinThreadPool();
     }