blob: f4a3cffa2486680e150bfd6aa956fc418e1a6391 [file] [log] [blame]
Steven Morelandbdb53ab2021-05-05 17:57:41 +00001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "RpcSession"
18
19#include <binder/RpcSession.h>
20
21#include <inttypes.h>
22#include <unistd.h>
23
24#include <string_view>
25
26#include <binder/Parcel.h>
Steven Morelandee78e762021-05-05 21:12:51 +000027#include <binder/RpcServer.h>
Steven Morelandbdb53ab2021-05-05 17:57:41 +000028#include <binder/Stability.h>
29#include <utils/String8.h>
30
31#include "RpcSocketAddress.h"
32#include "RpcState.h"
33#include "RpcWireFormat.h"
34
35#ifdef __GLIBC__
36extern "C" pid_t gettid();
37#endif
38
39namespace android {
40
41using base::unique_fd;
42
43RpcSession::RpcSession() {
44 LOG_RPC_DETAIL("RpcSession created %p", this);
45
46 mState = std::make_unique<RpcState>();
47}
48RpcSession::~RpcSession() {
49 LOG_RPC_DETAIL("RpcSession destroyed %p", this);
50
51 std::lock_guard<std::mutex> _l(mMutex);
52 LOG_ALWAYS_FATAL_IF(mServers.size() != 0,
53 "Should not be able to destroy a session with servers in use.");
54}
55
56sp<RpcSession> RpcSession::make() {
57 return sp<RpcSession>::make();
58}
59
60bool RpcSession::setupUnixDomainClient(const char* path) {
61 return setupSocketClient(UnixSocketAddress(path));
62}
63
64#ifdef __BIONIC__
65
66bool RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
67 return setupSocketClient(VsockSocketAddress(cid, port));
68}
69
70#endif // __BIONIC__
71
72bool RpcSession::setupInetClient(const char* addr, unsigned int port) {
73 auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
74 if (aiStart == nullptr) return false;
75 for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
76 InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
77 if (setupSocketClient(socketAddress)) return true;
78 }
79 ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
80 return false;
81}
82
83bool RpcSession::addNullDebuggingClient() {
84 unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
85
86 if (serverFd == -1) {
87 ALOGE("Could not connect to /dev/null: %s", strerror(errno));
88 return false;
89 }
90
91 addClient(std::move(serverFd));
92 return true;
93}
94
95sp<IBinder> RpcSession::getRootObject() {
96 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
97 return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this));
98}
99
100status_t RpcSession::getMaxThreads(size_t* maxThreads) {
101 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
102 return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads);
103}
104
105status_t RpcSession::transact(const RpcAddress& address, uint32_t code, const Parcel& data,
106 Parcel* reply, uint32_t flags) {
107 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
108 (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
109 : ConnectionUse::CLIENT);
110 return state()->transact(connection.fd(), address, code, data,
111 sp<RpcSession>::fromExisting(this), reply, flags);
112}
113
114status_t RpcSession::sendDecStrong(const RpcAddress& address) {
115 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
116 ConnectionUse::CLIENT_REFCOUNT);
117 return state()->sendDecStrong(connection.fd(), address);
118}
119
120status_t RpcSession::readId() {
121 {
122 std::lock_guard<std::mutex> _l(mMutex);
123 LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
124 }
125
126 int32_t id;
127
128 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
129 status_t status =
130 state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id);
131 if (status != OK) return status;
132
133 LOG_RPC_DETAIL("RpcSession %p has id %d", this, id);
134 mId = id;
135 return OK;
136}
137
138void RpcSession::startThread(unique_fd client) {
139 std::lock_guard<std::mutex> _l(mMutex);
140 sp<RpcSession> holdThis = sp<RpcSession>::fromExisting(this);
141 int fd = client.release();
142 auto thread = std::thread([=] {
143 holdThis->join(unique_fd(fd));
144 {
145 std::lock_guard<std::mutex> _l(holdThis->mMutex);
Steven Moreland2ff0d472021-05-05 22:20:40 +0000146 auto it = mThreads.find(std::this_thread::get_id());
147 LOG_ALWAYS_FATAL_IF(it == mThreads.end());
148 it->second.detach();
149 mThreads.erase(it);
Steven Morelandbdb53ab2021-05-05 17:57:41 +0000150 }
151 });
152 mThreads[thread.get_id()] = std::move(thread);
153}
154
155void RpcSession::join(unique_fd client) {
156 // must be registered to allow arbitrary client code executing commands to
157 // be able to do nested calls (we can't only read from it)
158 sp<RpcConnection> connection = assignServerToThisThread(std::move(client));
159
160 while (true) {
161 status_t error =
162 state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));
163
164 if (error != OK) {
165 ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str());
166 break;
167 }
168 }
169
170 LOG_ALWAYS_FATAL_IF(!removeServerConnection(connection),
171 "bad state: connection object guaranteed to be in list");
172}
173
Steven Morelandee78e762021-05-05 21:12:51 +0000174void RpcSession::terminateLocked() {
175 // TODO(b/185167543):
176 // - kindly notify other side of the connection of termination (can't be
177 // locked)
178 // - prevent new client/servers from being added
179 // - stop all threads which are currently reading/writing
180 // - terminate RpcState?
181
182 if (mTerminated) return;
183
184 sp<RpcServer> server = mForServer.promote();
185 if (server) {
186 server->onSessionTerminating(sp<RpcSession>::fromExisting(this));
187 }
188}
189
Steven Morelandbdb53ab2021-05-05 17:57:41 +0000190wp<RpcServer> RpcSession::server() {
191 return mForServer;
192}
193
194bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
195 {
196 std::lock_guard<std::mutex> _l(mMutex);
197 LOG_ALWAYS_FATAL_IF(mClients.size() != 0,
198 "Must only setup session once, but already has %zu clients",
199 mClients.size());
200 }
201
202 if (!setupOneSocketClient(addr, RPC_SESSION_ID_NEW)) return false;
203
204 // TODO(b/185167543): we should add additional sessions dynamically
205 // instead of all at once.
206 // TODO(b/186470974): first risk of blocking
207 size_t numThreadsAvailable;
208 if (status_t status = getMaxThreads(&numThreadsAvailable); status != OK) {
209 ALOGE("Could not get max threads after initial session to %s: %s", addr.toString().c_str(),
210 statusToString(status).c_str());
211 return false;
212 }
213
214 if (status_t status = readId(); status != OK) {
215 ALOGE("Could not get session id after initial session to %s; %s", addr.toString().c_str(),
216 statusToString(status).c_str());
217 return false;
218 }
219
220 // we've already setup one client
221 for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
222 // TODO(b/185167543): avoid race w/ accept4 not being called on server
223 for (size_t tries = 0; tries < 5; tries++) {
224 if (setupOneSocketClient(addr, mId.value())) break;
225 usleep(10000);
226 }
227 }
228
229 return true;
230}
231
232bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id) {
233 unique_fd serverFd(
234 TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
235 if (serverFd == -1) {
236 int savedErrno = errno;
237 ALOGE("Could not create socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
238 return false;
239 }
240
241 if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
242 int savedErrno = errno;
243 ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
244 return false;
245 }
246
247 if (sizeof(id) != TEMP_FAILURE_RETRY(write(serverFd.get(), &id, sizeof(id)))) {
248 int savedErrno = errno;
249 ALOGE("Could not write id to socket at %s: %s", addr.toString().c_str(),
250 strerror(savedErrno));
251 return false;
252 }
253
254 LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
255
256 addClient(std::move(serverFd));
257 return true;
258}
259
260void RpcSession::addClient(unique_fd fd) {
261 std::lock_guard<std::mutex> _l(mMutex);
262 sp<RpcConnection> session = sp<RpcConnection>::make();
263 session->fd = std::move(fd);
264 mClients.push_back(session);
265}
266
267void RpcSession::setForServer(const wp<RpcServer>& server, int32_t sessionId) {
268 mId = sessionId;
269 mForServer = server;
270}
271
272sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) {
273 std::lock_guard<std::mutex> _l(mMutex);
274 sp<RpcConnection> session = sp<RpcConnection>::make();
275 session->fd = std::move(fd);
276 session->exclusiveTid = gettid();
277 mServers.push_back(session);
278
279 return session;
280}
281
282bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
283 std::lock_guard<std::mutex> _l(mMutex);
284 if (auto it = std::find(mServers.begin(), mServers.end(), connection); it != mServers.end()) {
285 mServers.erase(it);
Steven Morelandee78e762021-05-05 21:12:51 +0000286 if (mServers.size() == 0) {
287 terminateLocked();
288 }
Steven Morelandbdb53ab2021-05-05 17:57:41 +0000289 return true;
290 }
291 return false;
292}
293
294RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& session,
295 ConnectionUse use)
296 : mSession(session) {
297 pid_t tid = gettid();
298 std::unique_lock<std::mutex> _l(mSession->mMutex);
299
300 mSession->mWaitingThreads++;
301 while (true) {
302 sp<RpcConnection> exclusive;
303 sp<RpcConnection> available;
304
305 // CHECK FOR DEDICATED CLIENT SOCKET
306 //
307 // A server/looper should always use a dedicated session if available
308 findConnection(tid, &exclusive, &available, mSession->mClients, mSession->mClientsOffset);
309
310 // WARNING: this assumes a server cannot request its client to send
311 // a transaction, as mServers is excluded below.
312 //
313 // Imagine we have more than one thread in play, and a single thread
314 // sends a synchronous, then an asynchronous command. Imagine the
315 // asynchronous command is sent on the first client connection. Then, if
316 // we naively send a synchronous command to that same connection, the
317 // thread on the far side might be busy processing the asynchronous
318 // command. So, we move to considering the second available thread
319 // for subsequent calls.
320 if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
321 mSession->mClientsOffset = (mSession->mClientsOffset + 1) % mSession->mClients.size();
322 }
323
324 // USE SERVING SOCKET (for nested transaction)
325 //
326 // asynchronous calls cannot be nested
327 if (use != ConnectionUse::CLIENT_ASYNC) {
328 // server connections are always assigned to a thread
329 findConnection(tid, &exclusive, nullptr /*available*/, mSession->mServers,
330 0 /* index hint */);
331 }
332
333 // if our thread is already using a session, prioritize using that
334 if (exclusive != nullptr) {
335 mConnection = exclusive;
336 mReentrant = true;
337 break;
338 } else if (available != nullptr) {
339 mConnection = available;
340 mConnection->exclusiveTid = tid;
341 break;
342 }
343
344 // in regular binder, this would usually be a deadlock :)
345 LOG_ALWAYS_FATAL_IF(mSession->mClients.size() == 0,
346 "Not a client of any session. You must create a session to an "
347 "RPC server to make any non-nested (e.g. oneway or on another thread) "
348 "calls.");
349
350 LOG_RPC_DETAIL("No available session (have %zu clients and %zu servers). Waiting...",
351 mSession->mClients.size(), mSession->mServers.size());
352 mSession->mAvailableConnectionCv.wait(_l);
353 }
354 mSession->mWaitingThreads--;
355}
356
357void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive,
358 sp<RpcConnection>* available,
359 std::vector<sp<RpcConnection>>& sockets,
360 size_t socketsIndexHint) {
361 LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
362 "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
363
364 if (*exclusive != nullptr) return; // consistent with break below
365
366 for (size_t i = 0; i < sockets.size(); i++) {
367 sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
368
369 // take first available session (intuition = caching)
370 if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
371 *available = socket;
372 continue;
373 }
374
375 // though, prefer to take session which is already inuse by this thread
376 // (nested transactions)
377 if (exclusive && socket->exclusiveTid == tid) {
378 *exclusive = socket;
379 break; // consistent with return above
380 }
381 }
382}
383
384RpcSession::ExclusiveConnection::~ExclusiveConnection() {
385 // reentrant use of a session means something less deep in the call stack
386 // is using this fd, and it retains the right to it. So, we don't give up
387 // exclusive ownership, and no thread is freed.
388 if (!mReentrant) {
389 std::unique_lock<std::mutex> _l(mSession->mMutex);
390 mConnection->exclusiveTid = std::nullopt;
391 if (mSession->mWaitingThreads > 0) {
392 _l.unlock();
393 mSession->mAvailableConnectionCv.notify_one();
394 }
395 }
396}
397
398} // namespace android