Reland "RPC Binder: dropping all binders drops session"
This reverts commit 30a96fcc9ef87244469c93c0dea087008dad58fd.
Bug: 271830568
Fixes: 273486801 - also disabled clang-tidy check this is hitting
Test: binderRpcTest
Change-Id: I655cf2c2542b71aaab610cd9cbc5a7d53755178f
diff --git a/libs/binder/tests/binderRpcBenchmark.cpp b/libs/binder/tests/binderRpcBenchmark.cpp
index 52ba9b0..5939273 100644
--- a/libs/binder/tests/binderRpcBenchmark.cpp
+++ b/libs/binder/tests/binderRpcBenchmark.cpp
@@ -102,9 +102,11 @@
}
static sp<RpcSession> gSession = RpcSession::make();
+static sp<IBinder> gRpcBinder;
// Certificate validation happens during handshake and does not affect the result of benchmarks.
// Skip certificate validation to simplify the setup process.
static sp<RpcSession> gSessionTls = RpcSession::make(makeFactoryTls());
+static sp<IBinder> gRpcTlsBinder;
#ifdef __BIONIC__
static const String16 kKernelBinderInstance = String16(u"binderRpcBenchmark-control");
static sp<IBinder> gKernelBinder;
@@ -118,9 +120,9 @@
return gKernelBinder;
#endif
case RPC:
- return gSession->getRootObject();
+ return gRpcBinder;
case RPC_TLS:
- return gSessionTls->getRootObject();
+ return gRpcTlsBinder;
default:
LOG(FATAL) << "Unknown transport value: " << transport;
return nullptr;
@@ -254,11 +256,13 @@
(void)unlink(addr.c_str());
forkRpcServer(addr.c_str(), RpcServer::make(RpcTransportCtxFactoryRaw::make()));
setupClient(gSession, addr.c_str());
+ gRpcBinder = gSession->getRootObject();
std::string tlsAddr = tmp + "/binderRpcTlsBenchmark";
(void)unlink(tlsAddr.c_str());
forkRpcServer(tlsAddr.c_str(), RpcServer::make(makeFactoryTls()));
setupClient(gSessionTls, tlsAddr.c_str());
+ gRpcTlsBinder = gSessionTls->getRootObject();
::benchmark::RunSpecifiedBenchmarks();
return 0;
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index 5952c41..87c84ba 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -163,7 +163,8 @@
session.root = nullptr;
}
- for (auto& info : sessions) {
+ for (size_t sessionNum = 0; sessionNum < sessions.size(); sessionNum++) {
+ auto& info = sessions.at(sessionNum);
sp<RpcSession>& session = info.session;
EXPECT_NE(nullptr, session);
@@ -179,6 +180,7 @@
for (size_t i = 0; i < 3; i++) {
sp<RpcSession> strongSession = weakSession.promote();
EXPECT_EQ(nullptr, strongSession)
+ << "For session " << sessionNum << ". "
<< (debugBacktrace(host.getPid()), debugBacktrace(getpid()),
"Leaked sess: ")
<< strongSession->getStrongCount() << " checked time " << i;
@@ -254,6 +256,10 @@
const BinderRpcOptions& options) {
CHECK_GE(options.numSessions, 1) << "Must have at least one session to a server";
+ if (options.numIncomingConnectionsBySession.size() != 0) {
+ CHECK_EQ(options.numIncomingConnectionsBySession.size(), options.numSessions);
+ }
+
SocketType socketType = std::get<0>(GetParam());
RpcSecurity rpcSecurity = std::get<1>(GetParam());
uint32_t clientVersion = std::get<2>(GetParam());
@@ -351,9 +357,15 @@
status_t status;
- for (const auto& session : sessions) {
+ for (size_t i = 0; i < sessions.size(); i++) {
+ const auto& session = sessions.at(i);
+
+ size_t numIncoming = options.numIncomingConnectionsBySession.size() > 0
+ ? options.numIncomingConnectionsBySession.at(i)
+ : 0;
+
CHECK(session->setProtocolVersion(clientVersion));
- session->setMaxIncomingThreads(options.numIncomingConnections);
+ session->setMaxIncomingThreads(numIncoming);
session->setMaxOutgoingConnections(options.numOutgoingConnections);
session->setFileDescriptorTransportMode(options.clientFileDescriptorTransportMode);
@@ -659,6 +671,32 @@
proc.proc->sessions.erase(proc.proc->sessions.begin() + 1);
}
+TEST_P(BinderRpc, SessionWithIncomingThreadpoolDoesntLeak) {
+ if (clientOrServerSingleThreaded()) {
+ GTEST_SKIP() << "This test requires multiple threads";
+ }
+
+ // session 0 - will check for leaks in destrutor of proc
+ // session 1 - we want to make sure it gets deleted when we drop all references to it
+ auto proc = createRpcTestSocketServerProcess(
+ {.numThreads = 1, .numIncomingConnectionsBySession = {0, 1}, .numSessions = 2});
+
+ wp<RpcSession> session = proc.proc->sessions.at(1).session;
+
+ // remove all references to the second session
+ proc.proc->sessions.at(1).root = nullptr;
+ proc.proc->sessions.erase(proc.proc->sessions.begin() + 1);
+
+ // TODO(b/271830568) more efficient way to wait for other incoming threadpool
+ // to drain commands.
+ for (size_t i = 0; i < 100; i++) {
+ usleep(10 * 1000);
+ if (session.promote() == nullptr) break;
+ }
+
+ EXPECT_EQ(nullptr, session.promote());
+}
+
TEST_P(BinderRpc, SingleDeathRecipient) {
if (clientOrServerSingleThreaded()) {
GTEST_SKIP() << "This test requires multiple threads";
@@ -676,7 +714,7 @@
// Death recipient needs to have an incoming connection to be called
auto proc = createRpcTestSocketServerProcess(
- {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
+ {.numThreads = 1, .numSessions = 1, .numIncomingConnectionsBySession = {1}});
auto dr = sp<MyDeathRec>::make();
ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
@@ -689,6 +727,10 @@
ASSERT_TRUE(dr->mCv.wait_for(lock, 100ms, [&]() { return dr->dead; }));
// need to wait for the session to shutdown so we don't "Leak session"
+ // can't do this before checking the death recipient by calling
+ // forceShutdown earlier, because shutdownAndWait will also trigger
+ // a death recipient, but if we had a way to wait for the service
+ // to gracefully shutdown, we could use that here.
EXPECT_TRUE(proc.proc->sessions.at(0).session->shutdownAndWait(true));
proc.expectAlreadyShutdown = true;
}
@@ -710,7 +752,7 @@
// Death recipient needs to have an incoming connection to be called
auto proc = createRpcTestSocketServerProcess(
- {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
+ {.numThreads = 1, .numSessions = 1, .numIncomingConnectionsBySession = {1}});
auto dr = sp<MyDeathRec>::make();
EXPECT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
@@ -743,8 +785,7 @@
void binderDied(const wp<IBinder>& /* who */) override {}
};
- auto proc = createRpcTestSocketServerProcess(
- {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 0});
+ auto proc = createRpcTestSocketServerProcess({.numThreads = 1, .numSessions = 1});
auto dr = sp<MyDeathRec>::make();
EXPECT_EQ(INVALID_OPERATION, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
@@ -763,19 +804,13 @@
// Death recipient needs to have an incoming connection to be called
auto proc = createRpcTestSocketServerProcess(
- {.numThreads = 1, .numSessions = 1, .numIncomingConnections = 1});
+ {.numThreads = 1, .numSessions = 1, .numIncomingConnectionsBySession = {1}});
auto dr = sp<MyDeathRec>::make();
ASSERT_EQ(OK, proc.rootBinder->linkToDeath(dr, (void*)1, 0));
ASSERT_EQ(OK, proc.rootBinder->unlinkToDeath(dr, (void*)1, 0, nullptr));
- if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
- EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
- }
-
- // need to wait for the session to shutdown so we don't "Leak session"
- EXPECT_TRUE(proc.proc->sessions.at(0).session->shutdownAndWait(true));
- proc.expectAlreadyShutdown = true;
+ proc.forceShutdown();
}
TEST_P(BinderRpc, Die) {
diff --git a/libs/binder/tests/binderRpcTestCommon.h b/libs/binder/tests/binderRpcTestCommon.h
index a467ee3..37c43f5 100644
--- a/libs/binder/tests/binderRpcTestCommon.h
+++ b/libs/binder/tests/binderRpcTestCommon.h
@@ -126,7 +126,11 @@
struct BinderRpcOptions {
size_t numThreads = 1;
size_t numSessions = 1;
- size_t numIncomingConnections = 0;
+ // right now, this can be empty, or length numSessions, where each value
+ // represents the info for the corresponding session, but we should
+ // probably switch this to be a list of sessions options so that other
+ // options can all be specified per session
+ std::vector<size_t> numIncomingConnectionsBySession = {};
size_t numOutgoingConnections = SIZE_MAX;
RpcSession::FileDescriptorTransportMode clientFileDescriptorTransportMode =
RpcSession::FileDescriptorTransportMode::NONE;
diff --git a/libs/binder/tests/binderRpcTestFixture.h b/libs/binder/tests/binderRpcTestFixture.h
index c99d68a..20fb6bf 100644
--- a/libs/binder/tests/binderRpcTestFixture.h
+++ b/libs/binder/tests/binderRpcTestFixture.h
@@ -64,6 +64,21 @@
// whether session should be invalidated by end of run
bool expectAlreadyShutdown = false;
+ // TODO(b/271830568): fix this in binderRpcTest, we always use the first session to cause the
+ // remote process to shutdown. Normally, when we shutdown, the default in the destructor is to
+ // check that there are no leaks and shutdown. However, when there are incoming threadpools,
+ // there will be a few extra binder threads there, so we can't shutdown the server. We should
+ // consider an alternative way of doing the test so that we don't need this, some ideas, such as
+ // program in understanding of incoming threadpool into the destructor so that (e.g.
+ // intelligently wait for sessions to shutdown now that they will do this)
+ void forceShutdown() {
+ if (auto status = rootIface->scheduleShutdown(); !status.isOk()) {
+ EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
+ }
+ EXPECT_TRUE(proc->sessions.at(0).session->shutdownAndWait(true));
+ expectAlreadyShutdown = true;
+ }
+
BinderRpcTestProcessSession(BinderRpcTestProcessSession&&) = default;
~BinderRpcTestProcessSession() {
if (!expectAlreadyShutdown) {
diff --git a/libs/binder/tests/binderRpcTestTrusty.cpp b/libs/binder/tests/binderRpcTestTrusty.cpp
index 63b56a3..85794bd 100644
--- a/libs/binder/tests/binderRpcTestTrusty.cpp
+++ b/libs/binder/tests/binderRpcTestTrusty.cpp
@@ -60,9 +60,9 @@
// threads.
std::unique_ptr<ProcessSession> BinderRpc::createRpcTestSocketServerProcessEtc(
const BinderRpcOptions& options) {
- LOG_ALWAYS_FATAL_IF(options.numIncomingConnections != 0,
+ LOG_ALWAYS_FATAL_IF(options.numIncomingConnectionsBySession.size() != 0,
"Non-zero incoming connections %zu on Trusty",
- options.numIncomingConnections);
+ options.numIncomingConnectionsBySession.size());
uint32_t clientVersion = std::get<2>(GetParam());
uint32_t serverVersion = std::get<3>(GetParam());
diff --git a/libs/binder/tests/binderRpcUniversalTests.cpp b/libs/binder/tests/binderRpcUniversalTests.cpp
index 11a22b0..1f46010 100644
--- a/libs/binder/tests/binderRpcUniversalTests.cpp
+++ b/libs/binder/tests/binderRpcUniversalTests.cpp
@@ -463,7 +463,7 @@
auto proc = createRpcTestSocketServerProcess(
{.numThreads = 1,
.numSessions = 1,
- .numIncomingConnections = numIncomingConnections});
+ .numIncomingConnectionsBySession = {numIncomingConnections}});
auto cb = sp<MyBinderRpcCallback>::make();
if (callIsOneway) {
@@ -491,16 +491,7 @@
<< "callIsOneway: " << callIsOneway
<< " callbackIsOneway: " << callbackIsOneway << " delayed: " << delayed;
- // since we are severing the connection, we need to go ahead and
- // tell the server to shutdown and exit so that waitpid won't hang
- if (auto status = proc.rootIface->scheduleShutdown(); !status.isOk()) {
- EXPECT_EQ(DEAD_OBJECT, status.transactionError()) << status;
- }
-
- // since this session has an incoming connection w/ a threadpool, we
- // need to manually shut it down
- EXPECT_TRUE(proc.proc->sessions.at(0).session->shutdownAndWait(true));
- proc.expectAlreadyShutdown = true;
+ proc.forceShutdown();
}
}
}