libbinder: RPC handle builtup refcounts
Generally, in the binder RPC wire protocol, we don't have both the
clients and the servers writing data into sockets. However, in the case
of async transactions, this happens in an unbounded way because a client
may send many oneway transactions, and the server will be sending back
refcounting information related to these transactions (which we process
lazily).
In order to prevent this from building up, when sending a transaction,
if we're unable to write it, instead of waiting, drain that reference
counting information.
Bug: 182940634
Test: binderRpcTest (no longer deadlocks in OnewayStressTest)
Test: manually check 'drainCommands' happens in both raw and tls cases
during this test (checking we are actually getting coverage)
Change-Id: I82039d6188196261b22316e95d8e180c4c33ae73
diff --git a/libs/binder/RpcTransportRaw.cpp b/libs/binder/RpcTransportRaw.cpp
index a22bc6f..7669518 100644
--- a/libs/binder/RpcTransportRaw.cpp
+++ b/libs/binder/RpcTransportRaw.cpp
@@ -46,7 +46,7 @@
template <typename Buffer, typename SendOrReceive>
status_t interruptableReadOrWrite(FdTrigger* fdTrigger, Buffer buffer, size_t size,
SendOrReceive sendOrReceiveFun, const char* funName,
- int16_t event) {
+ int16_t event, const std::function<status_t()>& altPoll) {
const Buffer end = buffer + size;
MAYBE_WAIT_IN_FLAKE_MODE;
@@ -57,9 +57,8 @@
return DEAD_OBJECT;
}
- bool first = true;
- status_t status;
- do {
+ bool havePolled = false;
+ while (true) {
ssize_t processSize = TEMP_FAILURE_RETRY(
sendOrReceiveFun(mSocket.get(), buffer, end - buffer, MSG_NOSIGNAL));
@@ -68,7 +67,8 @@
// Still return the error on later passes, since it would expose
// a problem with polling
- if (!first || (first && savedErrno != EAGAIN && savedErrno != EWOULDBLOCK)) {
+ if (havePolled ||
+ (!havePolled && savedErrno != EAGAIN && savedErrno != EWOULDBLOCK)) {
LOG_RPC_DETAIL("RpcTransport %s(): %s", funName, strerror(savedErrno));
return -savedErrno;
}
@@ -81,19 +81,30 @@
}
}
- if (first) first = false;
- } while ((status = fdTrigger->triggerablePoll(mSocket.get(), event)) == OK);
- return status;
+ if (altPoll) {
+ if (status_t status = altPoll(); status != OK) return status;
+ if (fdTrigger->isTriggered()) {
+ return DEAD_OBJECT;
+ }
+ } else {
+ if (status_t status = fdTrigger->triggerablePoll(mSocket.get(), event);
+ status != OK)
+ return status;
+ if (!havePolled) havePolled = true;
+ }
+ }
}
- status_t interruptableWriteFully(FdTrigger* fdTrigger, const void* data, size_t size) override {
+ status_t interruptableWriteFully(FdTrigger* fdTrigger, const void* data, size_t size,
+ const std::function<status_t()>& altPoll) override {
return interruptableReadOrWrite(fdTrigger, reinterpret_cast<const uint8_t*>(data), size,
- send, "send", POLLOUT);
+ send, "send", POLLOUT, altPoll);
}
- status_t interruptableReadFully(FdTrigger* fdTrigger, void* data, size_t size) override {
+ status_t interruptableReadFully(FdTrigger* fdTrigger, void* data, size_t size,
+ const std::function<status_t()>& altPoll) override {
return interruptableReadOrWrite(fdTrigger, reinterpret_cast<uint8_t*>(data), size, recv,
- "recv", POLLIN);
+ "recv", POLLIN, altPoll);
}
private: