blob: 09ec20dbf097b1754dc7a75336e42d40cc1212d1 [file] [log] [blame]
Steven Morelandbdb53ab2021-05-05 17:57:41 +00001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "RpcSession"
18
19#include <binder/RpcSession.h>
20
21#include <inttypes.h>
22#include <unistd.h>
23
24#include <string_view>
25
26#include <binder/Parcel.h>
27#include <binder/Stability.h>
28#include <utils/String8.h>
29
30#include "RpcSocketAddress.h"
31#include "RpcState.h"
32#include "RpcWireFormat.h"
33
34#ifdef __GLIBC__
35extern "C" pid_t gettid();
36#endif
37
38namespace android {
39
40using base::unique_fd;
41
42RpcSession::RpcSession() {
43 LOG_RPC_DETAIL("RpcSession created %p", this);
44
45 mState = std::make_unique<RpcState>();
46}
47RpcSession::~RpcSession() {
48 LOG_RPC_DETAIL("RpcSession destroyed %p", this);
49
50 std::lock_guard<std::mutex> _l(mMutex);
51 LOG_ALWAYS_FATAL_IF(mServers.size() != 0,
52 "Should not be able to destroy a session with servers in use.");
53}
54
55sp<RpcSession> RpcSession::make() {
56 return sp<RpcSession>::make();
57}
58
59bool RpcSession::setupUnixDomainClient(const char* path) {
60 return setupSocketClient(UnixSocketAddress(path));
61}
62
63#ifdef __BIONIC__
64
65bool RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
66 return setupSocketClient(VsockSocketAddress(cid, port));
67}
68
69#endif // __BIONIC__
70
71bool RpcSession::setupInetClient(const char* addr, unsigned int port) {
72 auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
73 if (aiStart == nullptr) return false;
74 for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
75 InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
76 if (setupSocketClient(socketAddress)) return true;
77 }
78 ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
79 return false;
80}
81
82bool RpcSession::addNullDebuggingClient() {
83 unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
84
85 if (serverFd == -1) {
86 ALOGE("Could not connect to /dev/null: %s", strerror(errno));
87 return false;
88 }
89
90 addClient(std::move(serverFd));
91 return true;
92}
93
94sp<IBinder> RpcSession::getRootObject() {
95 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
96 return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this));
97}
98
99status_t RpcSession::getMaxThreads(size_t* maxThreads) {
100 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
101 return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads);
102}
103
104status_t RpcSession::transact(const RpcAddress& address, uint32_t code, const Parcel& data,
105 Parcel* reply, uint32_t flags) {
106 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
107 (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
108 : ConnectionUse::CLIENT);
109 return state()->transact(connection.fd(), address, code, data,
110 sp<RpcSession>::fromExisting(this), reply, flags);
111}
112
113status_t RpcSession::sendDecStrong(const RpcAddress& address) {
114 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
115 ConnectionUse::CLIENT_REFCOUNT);
116 return state()->sendDecStrong(connection.fd(), address);
117}
118
119status_t RpcSession::readId() {
120 {
121 std::lock_guard<std::mutex> _l(mMutex);
122 LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
123 }
124
125 int32_t id;
126
127 ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
128 status_t status =
129 state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id);
130 if (status != OK) return status;
131
132 LOG_RPC_DETAIL("RpcSession %p has id %d", this, id);
133 mId = id;
134 return OK;
135}
136
137void RpcSession::startThread(unique_fd client) {
138 std::lock_guard<std::mutex> _l(mMutex);
139 sp<RpcSession> holdThis = sp<RpcSession>::fromExisting(this);
140 int fd = client.release();
141 auto thread = std::thread([=] {
142 holdThis->join(unique_fd(fd));
143 {
144 std::lock_guard<std::mutex> _l(holdThis->mMutex);
145 size_t erased = mThreads.erase(std::this_thread::get_id());
146 LOG_ALWAYS_FATAL_IF(erased != 0, "Could not erase thread.");
147 }
148 });
149 mThreads[thread.get_id()] = std::move(thread);
150}
151
152void RpcSession::join(unique_fd client) {
153 // must be registered to allow arbitrary client code executing commands to
154 // be able to do nested calls (we can't only read from it)
155 sp<RpcConnection> connection = assignServerToThisThread(std::move(client));
156
157 while (true) {
158 status_t error =
159 state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));
160
161 if (error != OK) {
162 ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str());
163 break;
164 }
165 }
166
167 LOG_ALWAYS_FATAL_IF(!removeServerConnection(connection),
168 "bad state: connection object guaranteed to be in list");
169}
170
171wp<RpcServer> RpcSession::server() {
172 return mForServer;
173}
174
175bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
176 {
177 std::lock_guard<std::mutex> _l(mMutex);
178 LOG_ALWAYS_FATAL_IF(mClients.size() != 0,
179 "Must only setup session once, but already has %zu clients",
180 mClients.size());
181 }
182
183 if (!setupOneSocketClient(addr, RPC_SESSION_ID_NEW)) return false;
184
185 // TODO(b/185167543): we should add additional sessions dynamically
186 // instead of all at once.
187 // TODO(b/186470974): first risk of blocking
188 size_t numThreadsAvailable;
189 if (status_t status = getMaxThreads(&numThreadsAvailable); status != OK) {
190 ALOGE("Could not get max threads after initial session to %s: %s", addr.toString().c_str(),
191 statusToString(status).c_str());
192 return false;
193 }
194
195 if (status_t status = readId(); status != OK) {
196 ALOGE("Could not get session id after initial session to %s; %s", addr.toString().c_str(),
197 statusToString(status).c_str());
198 return false;
199 }
200
201 // we've already setup one client
202 for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
203 // TODO(b/185167543): avoid race w/ accept4 not being called on server
204 for (size_t tries = 0; tries < 5; tries++) {
205 if (setupOneSocketClient(addr, mId.value())) break;
206 usleep(10000);
207 }
208 }
209
210 return true;
211}
212
213bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id) {
214 unique_fd serverFd(
215 TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
216 if (serverFd == -1) {
217 int savedErrno = errno;
218 ALOGE("Could not create socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
219 return false;
220 }
221
222 if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
223 int savedErrno = errno;
224 ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
225 return false;
226 }
227
228 if (sizeof(id) != TEMP_FAILURE_RETRY(write(serverFd.get(), &id, sizeof(id)))) {
229 int savedErrno = errno;
230 ALOGE("Could not write id to socket at %s: %s", addr.toString().c_str(),
231 strerror(savedErrno));
232 return false;
233 }
234
235 LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
236
237 addClient(std::move(serverFd));
238 return true;
239}
240
241void RpcSession::addClient(unique_fd fd) {
242 std::lock_guard<std::mutex> _l(mMutex);
243 sp<RpcConnection> session = sp<RpcConnection>::make();
244 session->fd = std::move(fd);
245 mClients.push_back(session);
246}
247
248void RpcSession::setForServer(const wp<RpcServer>& server, int32_t sessionId) {
249 mId = sessionId;
250 mForServer = server;
251}
252
253sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) {
254 std::lock_guard<std::mutex> _l(mMutex);
255 sp<RpcConnection> session = sp<RpcConnection>::make();
256 session->fd = std::move(fd);
257 session->exclusiveTid = gettid();
258 mServers.push_back(session);
259
260 return session;
261}
262
263bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
264 std::lock_guard<std::mutex> _l(mMutex);
265 if (auto it = std::find(mServers.begin(), mServers.end(), connection); it != mServers.end()) {
266 mServers.erase(it);
267 return true;
268 }
269 return false;
270}
271
272RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& session,
273 ConnectionUse use)
274 : mSession(session) {
275 pid_t tid = gettid();
276 std::unique_lock<std::mutex> _l(mSession->mMutex);
277
278 mSession->mWaitingThreads++;
279 while (true) {
280 sp<RpcConnection> exclusive;
281 sp<RpcConnection> available;
282
283 // CHECK FOR DEDICATED CLIENT SOCKET
284 //
285 // A server/looper should always use a dedicated session if available
286 findConnection(tid, &exclusive, &available, mSession->mClients, mSession->mClientsOffset);
287
288 // WARNING: this assumes a server cannot request its client to send
289 // a transaction, as mServers is excluded below.
290 //
291 // Imagine we have more than one thread in play, and a single thread
292 // sends a synchronous, then an asynchronous command. Imagine the
293 // asynchronous command is sent on the first client connection. Then, if
294 // we naively send a synchronous command to that same connection, the
295 // thread on the far side might be busy processing the asynchronous
296 // command. So, we move to considering the second available thread
297 // for subsequent calls.
298 if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
299 mSession->mClientsOffset = (mSession->mClientsOffset + 1) % mSession->mClients.size();
300 }
301
302 // USE SERVING SOCKET (for nested transaction)
303 //
304 // asynchronous calls cannot be nested
305 if (use != ConnectionUse::CLIENT_ASYNC) {
306 // server connections are always assigned to a thread
307 findConnection(tid, &exclusive, nullptr /*available*/, mSession->mServers,
308 0 /* index hint */);
309 }
310
311 // if our thread is already using a session, prioritize using that
312 if (exclusive != nullptr) {
313 mConnection = exclusive;
314 mReentrant = true;
315 break;
316 } else if (available != nullptr) {
317 mConnection = available;
318 mConnection->exclusiveTid = tid;
319 break;
320 }
321
322 // in regular binder, this would usually be a deadlock :)
323 LOG_ALWAYS_FATAL_IF(mSession->mClients.size() == 0,
324 "Not a client of any session. You must create a session to an "
325 "RPC server to make any non-nested (e.g. oneway or on another thread) "
326 "calls.");
327
328 LOG_RPC_DETAIL("No available session (have %zu clients and %zu servers). Waiting...",
329 mSession->mClients.size(), mSession->mServers.size());
330 mSession->mAvailableConnectionCv.wait(_l);
331 }
332 mSession->mWaitingThreads--;
333}
334
335void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive,
336 sp<RpcConnection>* available,
337 std::vector<sp<RpcConnection>>& sockets,
338 size_t socketsIndexHint) {
339 LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
340 "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
341
342 if (*exclusive != nullptr) return; // consistent with break below
343
344 for (size_t i = 0; i < sockets.size(); i++) {
345 sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
346
347 // take first available session (intuition = caching)
348 if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
349 *available = socket;
350 continue;
351 }
352
353 // though, prefer to take session which is already inuse by this thread
354 // (nested transactions)
355 if (exclusive && socket->exclusiveTid == tid) {
356 *exclusive = socket;
357 break; // consistent with return above
358 }
359 }
360}
361
362RpcSession::ExclusiveConnection::~ExclusiveConnection() {
363 // reentrant use of a session means something less deep in the call stack
364 // is using this fd, and it retains the right to it. So, we don't give up
365 // exclusive ownership, and no thread is freed.
366 if (!mReentrant) {
367 std::unique_lock<std::mutex> _l(mSession->mMutex);
368 mConnection->exclusiveTid = std::nullopt;
369 if (mSession->mWaitingThreads > 0) {
370 _l.unlock();
371 mSession->mAvailableConnectionCv.notify_one();
372 }
373 }
374}
375
376} // namespace android