Add linkToDeath support for RPC binder so a client can act on disconnect

This requires an incoming thread to be listening to the connection in
order to be notified.

Test: atest binderRpcTest
Test: atest binderRpcTestSingleThreaded
Bug: 182939380
Change-Id: I3746de6e8cff99bb267867c5dc60a6815b19fb92
diff --git a/libs/binder/BpBinder.cpp b/libs/binder/BpBinder.cpp
index 82ebdd7..b6d35ef 100644
--- a/libs/binder/BpBinder.cpp
+++ b/libs/binder/BpBinder.cpp
@@ -343,11 +343,23 @@
 status_t BpBinder::linkToDeath(
     const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags)
 {
-    if (isRpcBinder()) return UNKNOWN_TRANSACTION;
-
-    if constexpr (!kEnableKernelIpc) {
+    if (isRpcBinder()) {
+        if (rpcSession()->getMaxIncomingThreads() < 1) {
+            LOG_ALWAYS_FATAL("Cannot register a DeathRecipient without any incoming connections.");
+            return INVALID_OPERATION;
+        }
+    } else if constexpr (!kEnableKernelIpc) {
         LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
         return INVALID_OPERATION;
+    } else {
+        if (ProcessState::self()->getThreadPoolMaxTotalThreadCount() == 0) {
+            ALOGW("Linking to death on %s but there are no threads (yet?) listening to incoming "
+                  "transactions. See ProcessState::startThreadPool and "
+                  "ProcessState::setThreadPoolMaxThreadCount. Generally you should setup the "
+                  "binder "
+                  "threadpool before other initialization steps.",
+                  String8(getInterfaceDescriptor()).c_str());
+        }
     }
 
     Obituary ob;
@@ -358,14 +370,6 @@
     LOG_ALWAYS_FATAL_IF(recipient == nullptr,
                         "linkToDeath(): recipient must be non-NULL");
 
-    if (ProcessState::self()->getThreadPoolMaxTotalThreadCount() == 0) {
-        ALOGW("Linking to death on %s but there are no threads (yet?) listening to incoming "
-              "transactions. See ProcessState::startThreadPool and "
-              "ProcessState::setThreadPoolMaxThreadCount. Generally you should setup the binder "
-              "threadpool before other initialization steps.",
-              String8(getInterfaceDescriptor()).c_str());
-    }
-
     {
         AutoMutex _l(mLock);
 
@@ -376,10 +380,14 @@
                     return NO_MEMORY;
                 }
                 ALOGV("Requesting death notification: %p handle %d\n", this, binderHandle());
-                getWeakRefs()->incWeak(this);
-                IPCThreadState* self = IPCThreadState::self();
-                self->requestDeathNotification(binderHandle(), this);
-                self->flushCommands();
+                if (!isRpcBinder()) {
+                    if constexpr (kEnableKernelIpc) {
+                        getWeakRefs()->incWeak(this);
+                        IPCThreadState* self = IPCThreadState::self();
+                        self->requestDeathNotification(binderHandle(), this);
+                        self->flushCommands();
+                    }
+                }
             }
             ssize_t res = mObituaries->add(ob);
             return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res;
@@ -394,9 +402,7 @@
     const wp<DeathRecipient>& recipient, void* cookie, uint32_t flags,
     wp<DeathRecipient>* outRecipient)
 {
-    if (isRpcBinder()) return UNKNOWN_TRANSACTION;
-
-    if constexpr (!kEnableKernelIpc) {
+    if (!kEnableKernelIpc && !isRpcBinder()) {
         LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
         return INVALID_OPERATION;
     }
@@ -419,9 +425,13 @@
             mObituaries->removeAt(i);
             if (mObituaries->size() == 0) {
                 ALOGV("Clearing death notification: %p handle %d\n", this, binderHandle());
-                IPCThreadState* self = IPCThreadState::self();
-                self->clearDeathNotification(binderHandle(), this);
-                self->flushCommands();
+                if (!isRpcBinder()) {
+                    if constexpr (kEnableKernelIpc) {
+                        IPCThreadState* self = IPCThreadState::self();
+                        self->clearDeathNotification(binderHandle(), this);
+                        self->flushCommands();
+                    }
+                }
                 delete mObituaries;
                 mObituaries = nullptr;
             }
@@ -434,9 +444,7 @@
 
 void BpBinder::sendObituary()
 {
-    LOG_ALWAYS_FATAL_IF(isRpcBinder(), "Cannot send obituary for remote binder.");
-
-    if constexpr (!kEnableKernelIpc) {
+    if (!kEnableKernelIpc && !isRpcBinder()) {
         LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
         return;
     }
@@ -451,9 +459,13 @@
     Vector<Obituary>* obits = mObituaries;
     if(obits != nullptr) {
         ALOGV("Clearing sent death notification: %p handle %d\n", this, binderHandle());
-        IPCThreadState* self = IPCThreadState::self();
-        self->clearDeathNotification(binderHandle(), this);
-        self->flushCommands();
+        if (!isRpcBinder()) {
+            if constexpr (kEnableKernelIpc) {
+                IPCThreadState* self = IPCThreadState::self();
+                self->clearDeathNotification(binderHandle(), this);
+                self->flushCommands();
+            }
+        }
         mObituaries = nullptr;
     }
     mObitsSent = 1;
diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp
index 80f6a37..e6dbd79 100644
--- a/libs/binder/RpcSession.cpp
+++ b/libs/binder/RpcSession.cpp
@@ -223,6 +223,11 @@
 
     _l.unlock();
 
+    if (status_t res = state()->sendObituaries(sp<RpcSession>::fromExisting(this)); res != OK) {
+        ALOGE("Failed to send obituaries as the RpcSession is shutting down: %s",
+              statusToString(res).c_str());
+    }
+
     mRpcBinderState->clear();
 
     return true;
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 28730ff..e3ff64d 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -226,6 +226,30 @@
     return OK;
 }
 
+status_t RpcState::sendObituaries(const sp<RpcSession>& session) {
+    RpcMutexUniqueLock _l(mNodeMutex);
+
+    // Gather strong pointers to all of the remote binders for this session so
+    // we hold the strong references. remoteBinder() returns a raw pointer.
+    // Send the obituaries and drop the strong pointers outside of the lock so
+    // the destructors and the onBinderDied calls are not done while locked.
+    std::vector<sp<IBinder>> remoteBinders;
+    for (const auto& [_, binderNode] : mNodeForAddress) {
+        if (auto binder = binderNode.binder.promote()) {
+            remoteBinders.push_back(std::move(binder));
+        }
+    }
+    _l.unlock();
+
+    for (const auto& binder : remoteBinders) {
+        if (binder->remoteBinder() &&
+            binder->remoteBinder()->getPrivateAccessor().rpcSession() == session) {
+            binder->remoteBinder()->sendObituary();
+        }
+    }
+    return OK;
+}
+
 size_t RpcState::countBinders() {
     RpcMutexLockGuard _l(mNodeMutex);
     return mNodeForAddress.size();
diff --git a/libs/binder/RpcState.h b/libs/binder/RpcState.h
index 892ecd6..7aab5ee 100644
--- a/libs/binder/RpcState.h
+++ b/libs/binder/RpcState.h
@@ -140,6 +140,11 @@
      */
     [[nodiscard]] status_t flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
                                                  const sp<IBinder>& binder);
+    /**
+     * Called when the RpcSession is shutdown.
+     * Send obituaries for each known remote binder with this session.
+     */
+    [[nodiscard]] status_t sendObituaries(const sp<RpcSession>& session);
 
     size_t countBinders();
     void dump();
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index fca3c29..f6ab667 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -51,7 +51,7 @@
     Parcel p;
     p.writeInt32(3);
 
-    EXPECT_DEATH(p.markForBinder(sp<BBinder>::make()), "");
+    EXPECT_DEATH(p.markForBinder(sp<BBinder>::make()), "format must be set before data is written");
 }
 
 class BinderRpcServerOnly : public ::testing::TestWithParam<std::tuple<RpcSecurity, uint32_t>> {
@@ -1042,6 +1042,125 @@
     }
 }
 
+TEST_P(BinderRpc, SingleDeathRecipient) {
+    if (singleThreaded() || !kEnableRpcThreads) {
+        GTEST_SKIP() << "This test requires multiple threads";
+    }
+    class MyDeathRec : public IBinder::DeathRecipient {
+    public:
+        void binderDied(const wp<IBinder>& /* who */) override {
+            dead = true;
+            mCv.notify_one();
+        }
+        std::mutex mMtx;
+        std::condition_variable mCv;
+        bool dead = false;
+    };
+
+    // Death recipient needs to have an incoming connection to be called
+    auto proc = createRpcTestSocketServerProcess(
+            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
+
+    auto dr = sp<MyDeathRec>::make();
+    ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
+
+    if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
+        EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
+    }
+
+    std::unique_lock<std::mutex> lock(dr->mMtx);
+    if (!dr->dead) {
+        EXPECT_EQ(std::cv_status::no_timeout, dr->mCv.wait_for(lock, 1000ms));
+    }
+    EXPECT_TRUE(dr->dead) << "Failed to receive the death notification.";
+
+    // need to wait for the session to shutdown so we don't "Leak session"
+    EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));
+    proc.expectAlreadyShutdown = true;
+}
+
+TEST_P(BinderRpc, SingleDeathRecipientOnShutdown) {
+    if (singleThreaded() || !kEnableRpcThreads) {
+        GTEST_SKIP() << "This test requires multiple threads";
+    }
+    class MyDeathRec : public IBinder::DeathRecipient {
+    public:
+        void binderDied(const wp<IBinder>& /* who */) override {
+            dead = true;
+            mCv.notify_one();
+        }
+        std::mutex mMtx;
+        std::condition_variable mCv;
+        bool dead = false;
+    };
+
+    // Death recipient needs to have an incoming connection to be called
+    auto proc = createRpcTestSocketServerProcess(
+            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
+
+    auto dr = sp<MyDeathRec>::make();
+    EXPECT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
+
+    // Explicitly calling shutDownAndWait will cause the death recipients
+    // to be called.
+    EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));
+
+    std::unique_lock<std::mutex> lock(dr->mMtx);
+    if (!dr->dead) {
+        EXPECT_EQ(std::cv_status::no_timeout, dr->mCv.wait_for(lock, 1000ms));
+    }
+    EXPECT_TRUE(dr->dead) << "Failed to receive the death notification.";
+
+    proc.proc.host.terminate();
+    proc.proc.host.setCustomExitStatusCheck([](int wstatus) {
+        EXPECT_TRUE(WIFSIGNALED(wstatus) && WTERMSIG(wstatus) == SIGTERM)
+                << "server process failed incorrectly: " << WaitStatusToString(wstatus);
+    });
+    proc.expectAlreadyShutdown = true;
+}
+
+TEST_P(BinderRpc, DeathRecipientFatalWithoutIncoming) {
+    class MyDeathRec : public IBinder::DeathRecipient {
+    public:
+        void binderDied(const wp<IBinder>& /* who */) override {}
+    };
+
+    auto proc = createRpcTestSocketServerProcess(
+            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 0});
+
+    auto dr = sp<MyDeathRec>::make();
+    EXPECT_DEATH(proc.rootBinder->linkToDeath(dr, (void*)1, 0),
+                 "Cannot register a DeathRecipient without any incoming connections.");
+}
+
+TEST_P(BinderRpc, UnlinkDeathRecipient) {
+    if (singleThreaded() || !kEnableRpcThreads) {
+        GTEST_SKIP() << "This test requires multiple threads";
+    }
+    class MyDeathRec : public IBinder::DeathRecipient {
+    public:
+        void binderDied(const wp<IBinder>& /* who */) override {
+            GTEST_FAIL() << "This should not be called after unlinkToDeath";
+        }
+    };
+
+    // Death recipient needs to have an incoming connection to be called
+    auto proc = createRpcTestSocketServerProcess(
+            {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
+
+    auto dr = sp<MyDeathRec>::make();
+    ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
+    ASSERT_EQ(OK, proc.rootBinder->unlinkToDeath(dr, (void*)1, 0, nullptr));
+
+    if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
+        EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
+    }
+
+    // need to wait for the session to shutdown so we don't "Leak session"
+    EXPECT_TRUE(proc.proc.sessions.at(0).session->shutdownAndWait(true));
+    proc.expectAlreadyShutdown = true;
+}
+
 TEST_P(BinderRpc, OnewayCallbackWithNoThread) {
     auto proc = createRpcTestSocketServerProcess({});
     auto cb = sp<MyBinderRpcCallback>::make();