| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2020 The Android Open Source Project | 
|  | 3 | * | 
|  | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 5 | * you may not use this file except in compliance with the License. | 
|  | 6 | * You may obtain a copy of the License at | 
|  | 7 | * | 
|  | 8 | *      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 9 | * | 
|  | 10 | * Unless required by applicable law or agreed to in writing, software | 
|  | 11 | * distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 13 | * See the License for the specific language governing permissions and | 
|  | 14 | * limitations under the License. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | #define LOG_TAG "RpcSession" | 
|  | 18 |  | 
|  | 19 | #include <binder/RpcSession.h> | 
|  | 20 |  | 
|  | 21 | #include <inttypes.h> | 
|  | 22 | #include <unistd.h> | 
|  | 23 |  | 
|  | 24 | #include <string_view> | 
|  | 25 |  | 
|  | 26 | #include <binder/Parcel.h> | 
| Steven Moreland | ee78e76 | 2021-05-05 21:12:51 +0000 | [diff] [blame] | 27 | #include <binder/RpcServer.h> | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 28 | #include <binder/Stability.h> | 
|  | 29 | #include <utils/String8.h> | 
|  | 30 |  | 
|  | 31 | #include "RpcSocketAddress.h" | 
|  | 32 | #include "RpcState.h" | 
|  | 33 | #include "RpcWireFormat.h" | 
|  | 34 |  | 
|  | 35 | #ifdef __GLIBC__ | 
|  | 36 | extern "C" pid_t gettid(); | 
|  | 37 | #endif | 
|  | 38 |  | 
|  | 39 | namespace android { | 
|  | 40 |  | 
|  | 41 | using base::unique_fd; | 
|  | 42 |  | 
|  | 43 | RpcSession::RpcSession() { | 
|  | 44 | LOG_RPC_DETAIL("RpcSession created %p", this); | 
|  | 45 |  | 
|  | 46 | mState = std::make_unique<RpcState>(); | 
|  | 47 | } | 
|  | 48 | RpcSession::~RpcSession() { | 
|  | 49 | LOG_RPC_DETAIL("RpcSession destroyed %p", this); | 
|  | 50 |  | 
|  | 51 | std::lock_guard<std::mutex> _l(mMutex); | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 52 | LOG_ALWAYS_FATAL_IF(mServerConnections.size() != 0, | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 53 | "Should not be able to destroy a session with servers in use."); | 
|  | 54 | } | 
|  | 55 |  | 
|  | 56 | sp<RpcSession> RpcSession::make() { | 
|  | 57 | return sp<RpcSession>::make(); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | bool RpcSession::setupUnixDomainClient(const char* path) { | 
|  | 61 | return setupSocketClient(UnixSocketAddress(path)); | 
|  | 62 | } | 
|  | 63 |  | 
|  | 64 | #ifdef __BIONIC__ | 
|  | 65 |  | 
|  | 66 | bool RpcSession::setupVsockClient(unsigned int cid, unsigned int port) { | 
|  | 67 | return setupSocketClient(VsockSocketAddress(cid, port)); | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | #endif // __BIONIC__ | 
|  | 71 |  | 
|  | 72 | bool RpcSession::setupInetClient(const char* addr, unsigned int port) { | 
|  | 73 | auto aiStart = InetSocketAddress::getAddrInfo(addr, port); | 
|  | 74 | if (aiStart == nullptr) return false; | 
|  | 75 | for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) { | 
|  | 76 | InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port); | 
|  | 77 | if (setupSocketClient(socketAddress)) return true; | 
|  | 78 | } | 
|  | 79 | ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port); | 
|  | 80 | return false; | 
|  | 81 | } | 
|  | 82 |  | 
|  | 83 | bool RpcSession::addNullDebuggingClient() { | 
|  | 84 | unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC))); | 
|  | 85 |  | 
|  | 86 | if (serverFd == -1) { | 
|  | 87 | ALOGE("Could not connect to /dev/null: %s", strerror(errno)); | 
|  | 88 | return false; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | addClient(std::move(serverFd)); | 
|  | 92 | return true; | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | sp<IBinder> RpcSession::getRootObject() { | 
|  | 96 | ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT); | 
|  | 97 | return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this)); | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | status_t RpcSession::getMaxThreads(size_t* maxThreads) { | 
|  | 101 | ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT); | 
|  | 102 | return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads); | 
|  | 103 | } | 
|  | 104 |  | 
|  | 105 | status_t RpcSession::transact(const RpcAddress& address, uint32_t code, const Parcel& data, | 
|  | 106 | Parcel* reply, uint32_t flags) { | 
|  | 107 | ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), | 
|  | 108 | (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC | 
|  | 109 | : ConnectionUse::CLIENT); | 
|  | 110 | return state()->transact(connection.fd(), address, code, data, | 
|  | 111 | sp<RpcSession>::fromExisting(this), reply, flags); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | status_t RpcSession::sendDecStrong(const RpcAddress& address) { | 
|  | 115 | ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), | 
|  | 116 | ConnectionUse::CLIENT_REFCOUNT); | 
|  | 117 | return state()->sendDecStrong(connection.fd(), address); | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 | status_t RpcSession::readId() { | 
|  | 121 | { | 
|  | 122 | std::lock_guard<std::mutex> _l(mMutex); | 
|  | 123 | LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client."); | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | int32_t id; | 
|  | 127 |  | 
|  | 128 | ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT); | 
|  | 129 | status_t status = | 
|  | 130 | state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id); | 
|  | 131 | if (status != OK) return status; | 
|  | 132 |  | 
|  | 133 | LOG_RPC_DETAIL("RpcSession %p has id %d", this, id); | 
|  | 134 | mId = id; | 
|  | 135 | return OK; | 
|  | 136 | } | 
|  | 137 |  | 
|  | 138 | void RpcSession::startThread(unique_fd client) { | 
|  | 139 | std::lock_guard<std::mutex> _l(mMutex); | 
|  | 140 | sp<RpcSession> holdThis = sp<RpcSession>::fromExisting(this); | 
|  | 141 | int fd = client.release(); | 
|  | 142 | auto thread = std::thread([=] { | 
|  | 143 | holdThis->join(unique_fd(fd)); | 
|  | 144 | { | 
|  | 145 | std::lock_guard<std::mutex> _l(holdThis->mMutex); | 
| Steven Moreland | 2ff0d47 | 2021-05-05 22:20:40 +0000 | [diff] [blame] | 146 | auto it = mThreads.find(std::this_thread::get_id()); | 
|  | 147 | LOG_ALWAYS_FATAL_IF(it == mThreads.end()); | 
|  | 148 | it->second.detach(); | 
|  | 149 | mThreads.erase(it); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 150 | } | 
|  | 151 | }); | 
|  | 152 | mThreads[thread.get_id()] = std::move(thread); | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | void RpcSession::join(unique_fd client) { | 
|  | 156 | // must be registered to allow arbitrary client code executing commands to | 
|  | 157 | // be able to do nested calls (we can't only read from it) | 
|  | 158 | sp<RpcConnection> connection = assignServerToThisThread(std::move(client)); | 
|  | 159 |  | 
|  | 160 | while (true) { | 
|  | 161 | status_t error = | 
|  | 162 | state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this)); | 
|  | 163 |  | 
|  | 164 | if (error != OK) { | 
|  | 165 | ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str()); | 
|  | 166 | break; | 
|  | 167 | } | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | LOG_ALWAYS_FATAL_IF(!removeServerConnection(connection), | 
|  | 171 | "bad state: connection object guaranteed to be in list"); | 
|  | 172 | } | 
|  | 173 |  | 
| Steven Moreland | ee78e76 | 2021-05-05 21:12:51 +0000 | [diff] [blame] | 174 | void RpcSession::terminateLocked() { | 
|  | 175 | // TODO(b/185167543): | 
|  | 176 | // - kindly notify other side of the connection of termination (can't be | 
|  | 177 | // locked) | 
|  | 178 | // - prevent new client/servers from being added | 
|  | 179 | // - stop all threads which are currently reading/writing | 
|  | 180 | // - terminate RpcState? | 
|  | 181 |  | 
|  | 182 | if (mTerminated) return; | 
|  | 183 |  | 
|  | 184 | sp<RpcServer> server = mForServer.promote(); | 
|  | 185 | if (server) { | 
|  | 186 | server->onSessionTerminating(sp<RpcSession>::fromExisting(this)); | 
|  | 187 | } | 
|  | 188 | } | 
|  | 189 |  | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 190 | wp<RpcServer> RpcSession::server() { | 
|  | 191 | return mForServer; | 
|  | 192 | } | 
|  | 193 |  | 
|  | 194 | bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) { | 
|  | 195 | { | 
|  | 196 | std::lock_guard<std::mutex> _l(mMutex); | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 197 | LOG_ALWAYS_FATAL_IF(mClientConnections.size() != 0, | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 198 | "Must only setup session once, but already has %zu clients", | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 199 | mClientConnections.size()); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 200 | } | 
|  | 201 |  | 
|  | 202 | if (!setupOneSocketClient(addr, RPC_SESSION_ID_NEW)) return false; | 
|  | 203 |  | 
|  | 204 | // TODO(b/185167543): we should add additional sessions dynamically | 
|  | 205 | // instead of all at once. | 
|  | 206 | // TODO(b/186470974): first risk of blocking | 
|  | 207 | size_t numThreadsAvailable; | 
|  | 208 | if (status_t status = getMaxThreads(&numThreadsAvailable); status != OK) { | 
|  | 209 | ALOGE("Could not get max threads after initial session to %s: %s", addr.toString().c_str(), | 
|  | 210 | statusToString(status).c_str()); | 
|  | 211 | return false; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | if (status_t status = readId(); status != OK) { | 
|  | 215 | ALOGE("Could not get session id after initial session to %s; %s", addr.toString().c_str(), | 
|  | 216 | statusToString(status).c_str()); | 
|  | 217 | return false; | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | // we've already setup one client | 
|  | 221 | for (size_t i = 0; i + 1 < numThreadsAvailable; i++) { | 
|  | 222 | // TODO(b/185167543): avoid race w/ accept4 not being called on server | 
|  | 223 | for (size_t tries = 0; tries < 5; tries++) { | 
|  | 224 | if (setupOneSocketClient(addr, mId.value())) break; | 
|  | 225 | usleep(10000); | 
|  | 226 | } | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | return true; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id) { | 
|  | 233 | unique_fd serverFd( | 
|  | 234 | TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0))); | 
|  | 235 | if (serverFd == -1) { | 
|  | 236 | int savedErrno = errno; | 
|  | 237 | ALOGE("Could not create socket at %s: %s", addr.toString().c_str(), strerror(savedErrno)); | 
|  | 238 | return false; | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) { | 
|  | 242 | int savedErrno = errno; | 
|  | 243 | ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(), strerror(savedErrno)); | 
|  | 244 | return false; | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | if (sizeof(id) != TEMP_FAILURE_RETRY(write(serverFd.get(), &id, sizeof(id)))) { | 
|  | 248 | int savedErrno = errno; | 
|  | 249 | ALOGE("Could not write id to socket at %s: %s", addr.toString().c_str(), | 
|  | 250 | strerror(savedErrno)); | 
|  | 251 | return false; | 
|  | 252 | } | 
|  | 253 |  | 
|  | 254 | LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get()); | 
|  | 255 |  | 
|  | 256 | addClient(std::move(serverFd)); | 
|  | 257 | return true; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | void RpcSession::addClient(unique_fd fd) { | 
|  | 261 | std::lock_guard<std::mutex> _l(mMutex); | 
|  | 262 | sp<RpcConnection> session = sp<RpcConnection>::make(); | 
|  | 263 | session->fd = std::move(fd); | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 264 | mClientConnections.push_back(session); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 265 | } | 
|  | 266 |  | 
|  | 267 | void RpcSession::setForServer(const wp<RpcServer>& server, int32_t sessionId) { | 
|  | 268 | mId = sessionId; | 
|  | 269 | mForServer = server; | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) { | 
|  | 273 | std::lock_guard<std::mutex> _l(mMutex); | 
|  | 274 | sp<RpcConnection> session = sp<RpcConnection>::make(); | 
|  | 275 | session->fd = std::move(fd); | 
|  | 276 | session->exclusiveTid = gettid(); | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 277 | mServerConnections.push_back(session); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 278 |  | 
|  | 279 | return session; | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) { | 
|  | 283 | std::lock_guard<std::mutex> _l(mMutex); | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 284 | if (auto it = std::find(mServerConnections.begin(), mServerConnections.end(), connection); | 
|  | 285 | it != mServerConnections.end()) { | 
|  | 286 | mServerConnections.erase(it); | 
|  | 287 | if (mServerConnections.size() == 0) { | 
| Steven Moreland | ee78e76 | 2021-05-05 21:12:51 +0000 | [diff] [blame] | 288 | terminateLocked(); | 
|  | 289 | } | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 290 | return true; | 
|  | 291 | } | 
|  | 292 | return false; | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& session, | 
|  | 296 | ConnectionUse use) | 
|  | 297 | : mSession(session) { | 
|  | 298 | pid_t tid = gettid(); | 
|  | 299 | std::unique_lock<std::mutex> _l(mSession->mMutex); | 
|  | 300 |  | 
|  | 301 | mSession->mWaitingThreads++; | 
|  | 302 | while (true) { | 
|  | 303 | sp<RpcConnection> exclusive; | 
|  | 304 | sp<RpcConnection> available; | 
|  | 305 |  | 
|  | 306 | // CHECK FOR DEDICATED CLIENT SOCKET | 
|  | 307 | // | 
|  | 308 | // A server/looper should always use a dedicated session if available | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 309 | findConnection(tid, &exclusive, &available, mSession->mClientConnections, | 
|  | 310 | mSession->mClientConnectionsOffset); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 311 |  | 
|  | 312 | // WARNING: this assumes a server cannot request its client to send | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 313 | // a transaction, as mServerConnections is excluded below. | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 314 | // | 
|  | 315 | // Imagine we have more than one thread in play, and a single thread | 
|  | 316 | // sends a synchronous, then an asynchronous command. Imagine the | 
|  | 317 | // asynchronous command is sent on the first client connection. Then, if | 
|  | 318 | // we naively send a synchronous command to that same connection, the | 
|  | 319 | // thread on the far side might be busy processing the asynchronous | 
|  | 320 | // command. So, we move to considering the second available thread | 
|  | 321 | // for subsequent calls. | 
|  | 322 | if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) { | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 323 | mSession->mClientConnectionsOffset = | 
|  | 324 | (mSession->mClientConnectionsOffset + 1) % mSession->mClientConnections.size(); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 325 | } | 
|  | 326 |  | 
|  | 327 | // USE SERVING SOCKET (for nested transaction) | 
|  | 328 | // | 
|  | 329 | // asynchronous calls cannot be nested | 
|  | 330 | if (use != ConnectionUse::CLIENT_ASYNC) { | 
|  | 331 | // server connections are always assigned to a thread | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 332 | findConnection(tid, &exclusive, nullptr /*available*/, mSession->mServerConnections, | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 333 | 0 /* index hint */); | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | // if our thread is already using a session, prioritize using that | 
|  | 337 | if (exclusive != nullptr) { | 
|  | 338 | mConnection = exclusive; | 
|  | 339 | mReentrant = true; | 
|  | 340 | break; | 
|  | 341 | } else if (available != nullptr) { | 
|  | 342 | mConnection = available; | 
|  | 343 | mConnection->exclusiveTid = tid; | 
|  | 344 | break; | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | // in regular binder, this would usually be a deadlock :) | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 348 | LOG_ALWAYS_FATAL_IF(mSession->mClientConnections.size() == 0, | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 349 | "Not a client of any session. You must create a session to an " | 
|  | 350 | "RPC server to make any non-nested (e.g. oneway or on another thread) " | 
|  | 351 | "calls."); | 
|  | 352 |  | 
|  | 353 | LOG_RPC_DETAIL("No available session (have %zu clients and %zu servers). Waiting...", | 
| Steven Moreland | bb543a8 | 2021-05-11 02:31:50 +0000 | [diff] [blame] | 354 | mSession->mClientConnections.size(), mSession->mServerConnections.size()); | 
| Steven Moreland | bdb53ab | 2021-05-05 17:57:41 +0000 | [diff] [blame] | 355 | mSession->mAvailableConnectionCv.wait(_l); | 
|  | 356 | } | 
|  | 357 | mSession->mWaitingThreads--; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive, | 
|  | 361 | sp<RpcConnection>* available, | 
|  | 362 | std::vector<sp<RpcConnection>>& sockets, | 
|  | 363 | size_t socketsIndexHint) { | 
|  | 364 | LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(), | 
|  | 365 | "Bad index %zu >= %zu", socketsIndexHint, sockets.size()); | 
|  | 366 |  | 
|  | 367 | if (*exclusive != nullptr) return; // consistent with break below | 
|  | 368 |  | 
|  | 369 | for (size_t i = 0; i < sockets.size(); i++) { | 
|  | 370 | sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()]; | 
|  | 371 |  | 
|  | 372 | // take first available session (intuition = caching) | 
|  | 373 | if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) { | 
|  | 374 | *available = socket; | 
|  | 375 | continue; | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | // though, prefer to take session which is already inuse by this thread | 
|  | 379 | // (nested transactions) | 
|  | 380 | if (exclusive && socket->exclusiveTid == tid) { | 
|  | 381 | *exclusive = socket; | 
|  | 382 | break; // consistent with return above | 
|  | 383 | } | 
|  | 384 | } | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | RpcSession::ExclusiveConnection::~ExclusiveConnection() { | 
|  | 388 | // reentrant use of a session means something less deep in the call stack | 
|  | 389 | // is using this fd, and it retains the right to it. So, we don't give up | 
|  | 390 | // exclusive ownership, and no thread is freed. | 
|  | 391 | if (!mReentrant) { | 
|  | 392 | std::unique_lock<std::mutex> _l(mSession->mMutex); | 
|  | 393 | mConnection->exclusiveTid = std::nullopt; | 
|  | 394 | if (mSession->mWaitingThreads > 0) { | 
|  | 395 | _l.unlock(); | 
|  | 396 | mSession->mAvailableConnectionCv.notify_one(); | 
|  | 397 | } | 
|  | 398 | } | 
|  | 399 | } | 
|  | 400 |  | 
|  | 401 | } // namespace android |