libbinder: RPC explicit check when out of async IDs
There are far more IDs in a uint64_t than is reasonable to be called on
a binder (commented in the CL, 1000 oneway transactions per second for
585 million years to a single binder). If necessary, we could remove
this limit by rotating the IDs (no more than 2**64 simultaneous
transactions to a single binder). However, there are more critical
problems to handle, so leaving this as-is for now.
Bug: 183140903
Test: binderRpcTest
Change-Id: I5fe33278c965745e31fc0b14a21626a4c680fa70
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 08bf4ec..93f1529 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -134,6 +134,15 @@
void RpcState::dump() {
std::lock_guard<std::mutex> _l(mNodeMutex);
+ dumpLocked();
+}
+
+void RpcState::terminate() {
+ std::unique_lock<std::mutex> _l(mNodeMutex);
+ terminate(_l);
+}
+
+void RpcState::dumpLocked() {
ALOGE("DUMP OF RpcState %p", this);
ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
for (const auto& [address, node] : mNodeForAddress) {
@@ -161,10 +170,10 @@
ALOGE("END DUMP OF RpcState");
}
-void RpcState::terminate() {
+void RpcState::terminate(std::unique_lock<std::mutex>& lock) {
if (SHOULD_LOG_RPC_DETAIL) {
ALOGE("RpcState::terminate()");
- dump();
+ dumpLocked();
}
// if the destructor of a binder object makes another RPC call, then calling
@@ -172,20 +181,20 @@
// mNodeMutex is no longer taken.
std::vector<sp<IBinder>> tempHoldBinder;
- {
- std::lock_guard<std::mutex> _l(mNodeMutex);
- mTerminated = true;
- for (auto& [address, node] : mNodeForAddress) {
- sp<IBinder> binder = node.binder.promote();
- LOG_ALWAYS_FATAL_IF(binder == nullptr, "Binder %p expected to be owned.", binder.get());
+ mTerminated = true;
+ for (auto& [address, node] : mNodeForAddress) {
+ sp<IBinder> binder = node.binder.promote();
+ LOG_ALWAYS_FATAL_IF(binder == nullptr, "Binder %p expected to be owned.", binder.get());
- if (node.sentRef != nullptr) {
- tempHoldBinder.push_back(node.sentRef);
- }
+ if (node.sentRef != nullptr) {
+ tempHoldBinder.push_back(node.sentRef);
}
-
- mNodeForAddress.clear();
}
+
+ mNodeForAddress.clear();
+
+ lock.unlock();
+ tempHoldBinder.clear(); // explicit
}
RpcState::CommandData::CommandData(size_t size) : mSize(size) {
@@ -341,14 +350,15 @@
uint64_t asyncNumber = 0;
if (!address.isZero()) {
- std::lock_guard<std::mutex> _l(mNodeMutex);
+ std::unique_lock<std::mutex> _l(mNodeMutex);
if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
auto it = mNodeForAddress.find(address);
LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Sending transact on unknown address %s",
address.toString().c_str());
if (flags & IBinder::FLAG_ONEWAY) {
- asyncNumber = it->second.asyncNumber++;
+ asyncNumber = it->second.asyncNumber;
+ if (!nodeProgressAsyncNumber(&it->second, _l)) return DEAD_OBJECT;
}
}
@@ -697,13 +707,7 @@
// last refcount dropped after this transaction happened
if (it == mNodeForAddress.end()) return OK;
- // note - only updated now, instead of later, so that other threads
- // will queue any later transactions
-
- // TODO(b/183140903): support > 2**64 async transactions
- // (we can do this by allowing asyncNumber to wrap, since we
- // don't expect more than 2**64 simultaneous transactions)
- it->second.asyncNumber++;
+ if (!nodeProgressAsyncNumber(&it->second, _l)) return DEAD_OBJECT;
if (it->second.asyncTodo.size() == 0) return OK;
if (it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
@@ -822,4 +826,16 @@
return ref;
}
+bool RpcState::nodeProgressAsyncNumber(BinderNode* node, std::unique_lock<std::mutex>& lock) {
+ // 2**64 =~ 10**19 =~ 1000 transactions per second for 585 million years to
+ // a single binder
+ if (node->asyncNumber >= std::numeric_limits<decltype(node->asyncNumber)>::max()) {
+ ALOGE("Out of async transaction IDs. Terminating");
+ terminate(lock);
+ return false;
+ }
+ node->asyncNumber++;
+ return true;
+}
+
} // namespace android