binderRpcTest: use waitpid

Actually reap child processes. This gives us stronger guarantees (that
everything can shut down) and it avoids 'kill'.

Bug: 186661301
Test: binderRpcTest
Change-Id: If10f00de845eb8097813b4edbf8e2b8ffdc90c5f
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index bff5543..93f9709 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -193,10 +193,12 @@
 
     mShutdownTrigger->trigger();
     while (mJoinThreadRunning || !mConnectingThreads.empty() || !mSessions.empty()) {
-        ALOGI("Waiting for RpcServer to shut down. Join thread running: %d, Connecting threads: "
-              "%zu, Sessions: %zu",
-              mJoinThreadRunning, mConnectingThreads.size(), mSessions.size());
-        mShutdownCv.wait(_l);
+        if (std::cv_status::timeout == mShutdownCv.wait_for(_l, std::chrono::seconds(1))) {
+            ALOGE("Waiting for RpcServer to shut down (1s w/o progress). Join thread running: %d, "
+                  "Connecting threads: "
+                  "%zu, Sessions: %zu. Is your server deadlocked?",
+                  mJoinThreadRunning, mConnectingThreads.size(), mSessions.size());
+        }
     }
 
     // At this point, we know join() is about to exit, but the thread that calls
diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp
index d5ce324..ac7544f 100644
--- a/libs/binder/RpcSession.cpp
+++ b/libs/binder/RpcSession.cpp
@@ -199,7 +199,8 @@
                 state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));
 
         if (error != OK) {
-            ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str());
+            LOG_RPC_DETAIL("Binder connection thread closing w/ status %s",
+                           statusToString(error).c_str());
             break;
         }
     }
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index d40fef6..1111b82 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -239,8 +239,11 @@
 
     if (status_t status = session->mShutdownTrigger->interruptableReadFully(fd.get(), data, size);
         status != OK) {
-        ALOGE("Failed to read %s (%zu bytes) on fd %d, error: %s", what, size, fd.get(),
-              statusToString(status).c_str());
+        if (status != -ECANCELED) {
+            ALOGE("Failed to read %s (%zu bytes) on fd %d, error: %s", what, size, fd.get(),
+                  statusToString(status).c_str());
+        }
+
         return false;
     }
 
diff --git a/libs/binder/tests/IBinderRpcTest.aidl b/libs/binder/tests/IBinderRpcTest.aidl
index 41daccc..646bcc6 100644
--- a/libs/binder/tests/IBinderRpcTest.aidl
+++ b/libs/binder/tests/IBinderRpcTest.aidl
@@ -55,6 +55,7 @@
     oneway void sleepMsAsync(int ms);
 
     void die(boolean cleanup);
+    void scheduleShutdown();
 
     void useKernelBinderCallingId();
 }
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index 9b2d88d..4d31673 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -194,6 +194,18 @@
             _exit(1);
         }
     }
+
+    Status scheduleShutdown() override {
+        sp<RpcServer> strongServer = server.promote();
+        if (strongServer == nullptr) {
+            return Status::fromExceptionCode(Status::EX_NULL_POINTER);
+        }
+        std::thread([=] {
+            LOG_ALWAYS_FATAL_IF(!strongServer->shutdown(), "Could not shutdown");
+        }).detach();
+        return Status::ok();
+    }
+
     Status useKernelBinderCallingId() override {
         // this is WRONG! It does not make sense when using RPC binder, and
         // because it is SO wrong, and so much code calls this, it should abort!
@@ -225,11 +237,13 @@
             prctl(PR_SET_PDEATHSIG, SIGHUP);
 
             f(&mPipe);
+
+            exit(0);
         }
     }
     ~Process() {
         if (mPid != 0) {
-            kill(mPid, SIGKILL);
+            waitpid(mPid, nullptr, 0);
         }
     }
     Pipe* getPipe() { return &mPipe; }
@@ -290,11 +304,11 @@
     sp<IBinderRpcTest> rootIface;
 
     // whether session should be invalidated by end of run
-    bool expectInvalid = false;
+    bool expectAlreadyShutdown = false;
 
     BinderRpcTestProcessSession(BinderRpcTestProcessSession&&) = default;
     ~BinderRpcTestProcessSession() {
-        if (!expectInvalid) {
+        if (!expectAlreadyShutdown) {
             std::vector<int32_t> remoteCounts;
             // calling over any sessions counts across all sessions
             EXPECT_OK(rootIface->countBinders(&remoteCounts));
@@ -302,6 +316,8 @@
             for (auto remoteCount : remoteCounts) {
                 EXPECT_EQ(remoteCount, 1);
             }
+
+            EXPECT_OK(rootIface->scheduleShutdown());
         }
 
         rootIface = nullptr;
@@ -373,6 +389,9 @@
                     configure(server);
 
                     server->join();
+
+                    // Another thread calls shutdown. Wait for it to complete.
+                    (void)server->shutdown();
                 }),
         };
 
@@ -424,15 +443,6 @@
     }
 };
 
-TEST_P(BinderRpc, RootObjectIsNull) {
-    auto proc = createRpcTestSocketServerProcess(1, 1, [](const sp<RpcServer>& server) {
-        // this is the default, but to be explicit
-        server->setRootObject(nullptr);
-    });
-
-    EXPECT_EQ(nullptr, proc.sessions.at(0).root);
-}
-
 TEST_P(BinderRpc, Ping) {
     auto proc = createRpcTestSocketServerProcess(1);
     ASSERT_NE(proc.rootBinder, nullptr);
@@ -900,7 +910,7 @@
         EXPECT_EQ(DEAD_OBJECT, proc.rootIface->die(doDeathCleanup).transactionError())
                 << "Do death cleanup: " << doDeathCleanup;
 
-        proc.expectInvalid = true;
+        proc.expectAlreadyShutdown = true;
     }
 }
 
@@ -914,7 +924,7 @@
     // second time! we catch the error :)
     EXPECT_EQ(DEAD_OBJECT, proc.rootIface->useKernelBinderCallingId().transactionError());
 
-    proc.expectInvalid = true;
+    proc.expectAlreadyShutdown = true;
 }
 
 TEST_P(BinderRpc, WorksWithLibbinderNdkPing) {