blob: 3b3adca34fce05af34f00e401de830fbe37e7c18 [file] [log] [blame]
Steven Moreland5553ac42020-11-11 02:14:45 +00001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "RpcState"
18
19#include "RpcState.h"
20
21#include <binder/BpBinder.h>
22#include <binder/RpcServer.h>
23
24#include "Debug.h"
25#include "RpcWireFormat.h"
26
27#include <inttypes.h>
28
29namespace android {
30
31RpcState::RpcState() {}
32RpcState::~RpcState() {}
33
34status_t RpcState::onBinderLeaving(const sp<RpcConnection>& connection, const sp<IBinder>& binder,
35 RpcAddress* outAddress) {
36 bool isRemote = binder->remoteBinder();
37 bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
38
39 if (isRpc && binder->remoteBinder()->getPrivateAccessorForId().rpcConnection() != connection) {
40 // We need to be able to send instructions over the socket for how to
41 // connect to a different server, and we also need to let the host
42 // process know that this is happening.
43 ALOGE("Canot send binder from unrelated binder RPC connection.");
44 return INVALID_OPERATION;
45 }
46
47 if (isRemote && !isRpc) {
48 // Without additional work, this would have the effect of using this
49 // process to proxy calls from the socket over to the other process, and
50 // it would make those calls look like they come from us (not over the
51 // sockets). In order to make this work transparently like binder, we
52 // would instead need to send instructions over the socket for how to
53 // connect to the host process, and we also need to let the host process
54 // know this was happening.
55 ALOGE("Cannot send binder proxy %p over sockets", binder.get());
56 return INVALID_OPERATION;
57 }
58
59 std::lock_guard<std::mutex> _l(mNodeMutex);
60
61 // TODO(b/182939933): maybe move address out of BpBinder, and keep binder->address map
62 // in RpcState
63 for (auto& [addr, node] : mNodeForAddress) {
64 if (binder == node.binder) {
65 if (isRpc) {
66 const RpcAddress& actualAddr =
67 binder->remoteBinder()->getPrivateAccessorForId().rpcAddress();
68 // TODO(b/182939933): this is only checking integrity of data structure
69 // a different data structure doesn't need this
70 LOG_ALWAYS_FATAL_IF(addr < actualAddr, "Address mismatch");
71 LOG_ALWAYS_FATAL_IF(actualAddr < addr, "Address mismatch");
72 }
73 node.timesSent++;
74 node.sentRef = binder; // might already be set
75 *outAddress = addr;
76 return OK;
77 }
78 }
79 LOG_ALWAYS_FATAL_IF(isRpc, "RPC binder must have known address at this point");
80
81 auto&& [it, inserted] = mNodeForAddress.insert({RpcAddress::unique(),
82 BinderNode{
83 .binder = binder,
84 .timesSent = 1,
85 .sentRef = binder,
86 }});
87 // TODO(b/182939933): better organization could avoid needing this log
88 LOG_ALWAYS_FATAL_IF(!inserted);
89
90 *outAddress = it->first;
91 return OK;
92}
93
94sp<IBinder> RpcState::onBinderEntering(const sp<RpcConnection>& connection,
95 const RpcAddress& address) {
96 std::unique_lock<std::mutex> _l(mNodeMutex);
97
98 if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
99 sp<IBinder> binder = it->second.binder.promote();
100
101 // implicitly have strong RPC refcount, since we received this binder
102 it->second.timesRecd++;
103
104 _l.unlock();
105
106 // We have timesRecd RPC refcounts, but we only need to hold on to one
107 // when we keep the object. All additional dec strongs are sent
108 // immediately, we wait to send the last one in BpBinder::onLastDecStrong.
109 (void)connection->sendDecStrong(address);
110
111 return binder;
112 }
113
114 auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
115 LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
116
117 // Currently, all binders are assumed to be part of the same connection (no
118 // device global binders in the RPC world).
119 sp<IBinder> binder = BpBinder::create(connection, it->first);
120 it->second.binder = binder;
121 it->second.timesRecd = 1;
122 return binder;
123}
124
125size_t RpcState::countBinders() {
126 std::lock_guard<std::mutex> _l(mNodeMutex);
127 return mNodeForAddress.size();
128}
129
130void RpcState::dump() {
131 std::lock_guard<std::mutex> _l(mNodeMutex);
132 ALOGE("DUMP OF RpcState %p", this);
133 ALOGE("DUMP OF RpcState (%zu nodes)", mNodeForAddress.size());
134 for (const auto& [address, node] : mNodeForAddress) {
135 sp<IBinder> binder = node.binder.promote();
136
137 const char* desc;
138 if (binder) {
139 if (binder->remoteBinder()) {
140 if (binder->remoteBinder()->isRpcBinder()) {
141 desc = "(rpc binder proxy)";
142 } else {
143 desc = "(binder proxy)";
144 }
145 } else {
146 desc = "(local binder)";
147 }
148 } else {
149 desc = "(null)";
150 }
151
152 ALOGE("- BINDER NODE: %p times sent:%zu times recd: %zu a:%s type:%s",
153 node.binder.unsafe_get(), node.timesSent, node.timesRecd, address.toString().c_str(),
154 desc);
155 }
156 ALOGE("END DUMP OF RpcState");
157}
158
159void RpcState::terminate() {
160 if (SHOULD_LOG_RPC_DETAIL) {
161 ALOGE("RpcState::terminate()");
162 dump();
163 }
164
165 // if the destructor of a binder object makes another RPC call, then calling
166 // decStrong could deadlock. So, we must hold onto these binders until
167 // mNodeMutex is no longer taken.
168 std::vector<sp<IBinder>> tempHoldBinder;
169
170 {
171 std::lock_guard<std::mutex> _l(mNodeMutex);
172 mTerminated = true;
173 for (auto& [address, node] : mNodeForAddress) {
174 sp<IBinder> binder = node.binder.promote();
175 LOG_ALWAYS_FATAL_IF(binder == nullptr, "Binder %p expected to be owned.", binder.get());
176
177 if (node.sentRef != nullptr) {
178 tempHoldBinder.push_back(node.sentRef);
179 }
180 }
181
182 mNodeForAddress.clear();
183 }
184}
185
186bool RpcState::rpcSend(const base::unique_fd& fd, const char* what, const void* data, size_t size) {
187 LOG_RPC_DETAIL("Sending %s on fd %d: %s", what, fd.get(), hexString(data, size).c_str());
188
189 if (size > std::numeric_limits<ssize_t>::max()) {
190 ALOGE("Cannot send %s at size %zu (too big)", what, size);
191 terminate();
192 return false;
193 }
194
Steven Morelandc6ddf362021-04-02 01:13:36 +0000195 ssize_t sent = TEMP_FAILURE_RETRY(send(fd.get(), data, size, MSG_NOSIGNAL));
Steven Moreland5553ac42020-11-11 02:14:45 +0000196
197 if (sent < 0 || sent != static_cast<ssize_t>(size)) {
198 ALOGE("Failed to send %s (sent %zd of %zu bytes) on fd %d, error: %s", what, sent, size,
199 fd.get(), strerror(errno));
200
201 terminate();
202 return false;
203 }
204
205 return true;
206}
207
208bool RpcState::rpcRec(const base::unique_fd& fd, const char* what, void* data, size_t size) {
209 if (size > std::numeric_limits<ssize_t>::max()) {
210 ALOGE("Cannot rec %s at size %zu (too big)", what, size);
211 terminate();
212 return false;
213 }
214
Steven Morelandc6ddf362021-04-02 01:13:36 +0000215 ssize_t recd = TEMP_FAILURE_RETRY(recv(fd.get(), data, size, MSG_WAITALL | MSG_NOSIGNAL));
Steven Moreland5553ac42020-11-11 02:14:45 +0000216
217 if (recd < 0 || recd != static_cast<ssize_t>(size)) {
218 terminate();
219
220 if (recd == 0 && errno == 0) {
221 LOG_RPC_DETAIL("No more data when trying to read %s on fd %d", what, fd.get());
222 return false;
223 }
224
225 ALOGE("Failed to read %s (received %zd of %zu bytes) on fd %d, error: %s", what, recd, size,
226 fd.get(), strerror(errno));
227 return false;
228 } else {
229 LOG_RPC_DETAIL("Received %s on fd %d: %s", what, fd.get(), hexString(data, size).c_str());
230 }
231
232 return true;
233}
234
235sp<IBinder> RpcState::getRootObject(const base::unique_fd& fd,
236 const sp<RpcConnection>& connection) {
237 Parcel data;
238 data.markForRpc(connection);
239 Parcel reply;
240
241 status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_ROOT, data,
242 connection, &reply, 0);
243 if (status != OK) {
244 ALOGE("Error getting root object: %s", statusToString(status).c_str());
245 return nullptr;
246 }
247
248 return reply.readStrongBinder();
249}
250
251status_t RpcState::transact(const base::unique_fd& fd, const RpcAddress& address, uint32_t code,
252 const Parcel& data, const sp<RpcConnection>& connection, Parcel* reply,
253 uint32_t flags) {
254 uint64_t asyncNumber = 0;
255
256 if (!address.isZero()) {
257 std::lock_guard<std::mutex> _l(mNodeMutex);
258 if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
259 auto it = mNodeForAddress.find(address);
260 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Sending transact on unknown address %s",
261 address.toString().c_str());
262
263 if (flags & IBinder::FLAG_ONEWAY) {
264 asyncNumber = it->second.asyncNumber++;
265 }
266 }
267
268 if (!data.isForRpc()) {
269 ALOGE("Refusing to send RPC with parcel not crafted for RPC");
270 return BAD_TYPE;
271 }
272
273 if (data.objectsCount() != 0) {
274 ALOGE("Parcel at %p has attached objects but is being used in an RPC call", &data);
275 return BAD_TYPE;
276 }
277
278 RpcWireTransaction transaction{
279 .address = address.viewRawEmbedded(),
280 .code = code,
281 .flags = flags,
282 .asyncNumber = asyncNumber,
283 };
284
285 std::vector<uint8_t> transactionData(sizeof(RpcWireTransaction) + data.dataSize());
286 memcpy(transactionData.data() + 0, &transaction, sizeof(RpcWireTransaction));
287 memcpy(transactionData.data() + sizeof(RpcWireTransaction), data.data(), data.dataSize());
288
289 if (transactionData.size() > std::numeric_limits<uint32_t>::max()) {
290 ALOGE("Transaction size too big %zu", transactionData.size());
291 return BAD_VALUE;
292 }
293
294 RpcWireHeader command{
295 .command = RPC_COMMAND_TRANSACT,
296 .bodySize = static_cast<uint32_t>(transactionData.size()),
297 };
298
299 if (!rpcSend(fd, "transact header", &command, sizeof(command))) {
300 return DEAD_OBJECT;
301 }
302 if (!rpcSend(fd, "command body", transactionData.data(), transactionData.size())) {
303 return DEAD_OBJECT;
304 }
305
306 if (flags & IBinder::FLAG_ONEWAY) {
307 return OK; // do not wait for result
308 }
309
310 LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
311
312 return waitForReply(fd, connection, reply);
313}
314
315static void cleanup_data(Parcel* p, const uint8_t* data, size_t dataSize,
316 const binder_size_t* objects, size_t objectsCount) {
317 (void)p;
318 delete[] const_cast<uint8_t*>(data - offsetof(RpcWireReply, data));
319 (void)dataSize;
320 LOG_ALWAYS_FATAL_IF(objects != nullptr);
321 LOG_ALWAYS_FATAL_IF(objectsCount, 0);
322}
323
324status_t RpcState::waitForReply(const base::unique_fd& fd, const sp<RpcConnection>& connection,
325 Parcel* reply) {
326 RpcWireHeader command;
327 while (true) {
328 if (!rpcRec(fd, "command header", &command, sizeof(command))) {
329 return DEAD_OBJECT;
330 }
331
332 if (command.command == RPC_COMMAND_REPLY) break;
333
334 status_t status = processServerCommand(fd, connection, command);
335 if (status != OK) return status;
336 }
337
338 uint8_t* data = new uint8_t[command.bodySize];
339
340 if (!rpcRec(fd, "reply body", data, command.bodySize)) {
341 return DEAD_OBJECT;
342 }
343
344 if (command.bodySize < sizeof(RpcWireReply)) {
345 ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireReply. Terminating!",
346 sizeof(RpcWireReply), command.bodySize);
347 terminate();
348 return BAD_VALUE;
349 }
350 RpcWireReply* rpcReply = reinterpret_cast<RpcWireReply*>(data);
351 if (rpcReply->status != OK) return rpcReply->status;
352
353 reply->ipcSetDataReference(rpcReply->data, command.bodySize - offsetof(RpcWireReply, data),
354 nullptr, 0, cleanup_data);
355
356 reply->markForRpc(connection);
357
358 return OK;
359}
360
361status_t RpcState::sendDecStrong(const base::unique_fd& fd, const RpcAddress& addr) {
362 {
363 std::lock_guard<std::mutex> _l(mNodeMutex);
364 if (mTerminated) return DEAD_OBJECT; // avoid fatal only, otherwise races
365 auto it = mNodeForAddress.find(addr);
366 LOG_ALWAYS_FATAL_IF(it == mNodeForAddress.end(), "Sending dec strong on unknown address %s",
367 addr.toString().c_str());
368 LOG_ALWAYS_FATAL_IF(it->second.timesRecd <= 0, "Bad dec strong %s",
369 addr.toString().c_str());
370
371 it->second.timesRecd--;
372 if (it->second.timesRecd == 0 && it->second.timesSent == 0) {
373 mNodeForAddress.erase(it);
374 }
375 }
376
377 RpcWireHeader cmd = {
378 .command = RPC_COMMAND_DEC_STRONG,
379 .bodySize = sizeof(RpcWireAddress),
380 };
381 if (!rpcSend(fd, "dec ref header", &cmd, sizeof(cmd))) return DEAD_OBJECT;
382 if (!rpcSend(fd, "dec ref body", &addr.viewRawEmbedded(), sizeof(RpcWireAddress)))
383 return DEAD_OBJECT;
384 return OK;
385}
386
387status_t RpcState::getAndExecuteCommand(const base::unique_fd& fd,
388 const sp<RpcConnection>& connection) {
389 LOG_RPC_DETAIL("getAndExecuteCommand on fd %d", fd.get());
390
391 RpcWireHeader command;
392 if (!rpcRec(fd, "command header", &command, sizeof(command))) {
393 return DEAD_OBJECT;
394 }
395
396 return processServerCommand(fd, connection, command);
397}
398
399status_t RpcState::processServerCommand(const base::unique_fd& fd,
400 const sp<RpcConnection>& connection,
401 const RpcWireHeader& command) {
402 switch (command.command) {
403 case RPC_COMMAND_TRANSACT:
404 return processTransact(fd, connection, command);
405 case RPC_COMMAND_DEC_STRONG:
406 return processDecStrong(fd, command);
407 }
408
409 // We should always know the version of the opposing side, and since the
410 // RPC-binder-level wire protocol is not self synchronizing, we have no way
411 // to understand where the current command ends and the next one begins. We
412 // also can't consider it a fatal error because this would allow any client
413 // to kill us, so ending the connection for misbehaving client.
414 ALOGE("Unknown RPC command %d - terminating connection", command.command);
415 terminate();
416 return DEAD_OBJECT;
417}
418status_t RpcState::processTransact(const base::unique_fd& fd, const sp<RpcConnection>& connection,
419 const RpcWireHeader& command) {
420 LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
421
422 std::vector<uint8_t> transactionData(command.bodySize);
423 if (!rpcRec(fd, "transaction body", transactionData.data(), transactionData.size())) {
424 return DEAD_OBJECT;
425 }
426
427 return processTransactInternal(fd, connection, std::move(transactionData));
428}
429
430status_t RpcState::processTransactInternal(const base::unique_fd& fd,
431 const sp<RpcConnection>& connection,
432 std::vector<uint8_t>&& transactionData) {
433 if (transactionData.size() < sizeof(RpcWireTransaction)) {
434 ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
435 sizeof(RpcWireTransaction), transactionData.size());
436 terminate();
437 return BAD_VALUE;
438 }
439 RpcWireTransaction* transaction = reinterpret_cast<RpcWireTransaction*>(transactionData.data());
440
441 // TODO(b/182939933): heap allocation just for lookup in mNodeForAddress,
442 // maybe add an RpcAddress 'view' if the type remains 'heavy'
443 auto addr = RpcAddress::fromRawEmbedded(&transaction->address);
444
445 status_t replyStatus = OK;
446 sp<IBinder> target;
447 if (!addr.isZero()) {
448 std::lock_guard<std::mutex> _l(mNodeMutex);
449
450 auto it = mNodeForAddress.find(addr);
451 if (it == mNodeForAddress.end()) {
452 ALOGE("Unknown binder address %s.", addr.toString().c_str());
453 dump();
454 replyStatus = BAD_VALUE;
455 } else {
456 target = it->second.binder.promote();
457 if (target == nullptr) {
458 // This can happen if the binder is remote in this process, and
459 // another thread has called the last decStrong on this binder.
460 // However, for local binders, it indicates a misbehaving client
461 // (any binder which is being transacted on should be holding a
462 // strong ref count), so in either case, terminating the
463 // connection.
464 ALOGE("While transacting, binder has been deleted at address %s. Terminating!",
465 addr.toString().c_str());
466 terminate();
467 replyStatus = BAD_VALUE;
468 } else if (target->localBinder() == nullptr) {
469 ALOGE("Transactions can only go to local binders, not address %s. Terminating!",
470 addr.toString().c_str());
471 terminate();
472 replyStatus = BAD_VALUE;
473 } else if (transaction->flags & IBinder::FLAG_ONEWAY) {
474 if (transaction->asyncNumber != it->second.asyncNumber) {
475 // we need to process some other asynchronous transaction
476 // first
477 // TODO(b/183140903): limit enqueues/detect overfill for bad client
478 // TODO(b/183140903): detect when an object is deleted when it still has
479 // pending async transactions
480 it->second.asyncTodo.push(BinderNode::AsyncTodo{
481 .data = std::move(transactionData),
482 .asyncNumber = transaction->asyncNumber,
483 });
484 LOG_RPC_DETAIL("Enqueuing %" PRId64 " on %s", transaction->asyncNumber,
485 addr.toString().c_str());
486 return OK;
487 }
488 }
489 }
490 }
491
492 Parcel data;
493 data.setData(transaction->data, transactionData.size() - offsetof(RpcWireTransaction, data));
494 data.markForRpc(connection);
495
496 Parcel reply;
497 reply.markForRpc(connection);
498
499 if (replyStatus == OK) {
500 if (target) {
501 replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
502 } else {
503 LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
504 // special case for 'zero' address (special server commands)
505 switch (transaction->code) {
506 case RPC_SPECIAL_TRANSACT_GET_ROOT: {
507 sp<IBinder> root;
508 sp<RpcServer> server = connection->server().promote();
509 if (server) {
510 root = server->getRootObject();
511 } else {
512 ALOGE("Root object requested, but no server attached.");
513 }
514
515 replyStatus = reply.writeStrongBinder(root);
516 break;
517 }
518 default: {
519 replyStatus = UNKNOWN_TRANSACTION;
520 }
521 }
522 }
523 }
524
525 if (transaction->flags & IBinder::FLAG_ONEWAY) {
526 if (replyStatus != OK) {
527 ALOGW("Oneway call failed with error: %d", replyStatus);
528 }
529
530 LOG_RPC_DETAIL("Processed async transaction %" PRId64 " on %s", transaction->asyncNumber,
531 addr.toString().c_str());
532
533 // Check to see if there is another asynchronous transaction to process.
534 // This behavior differs from binder behavior, since in the binder
535 // driver, asynchronous transactions will be processed after existing
536 // pending binder transactions on the queue. The downside of this is
537 // that asynchronous transactions can be drowned out by synchronous
538 // transactions. However, we have no easy way to queue these
539 // transactions after the synchronous transactions we may want to read
540 // from the wire. So, in socket binder here, we have the opposite
541 // downside: asynchronous transactions may drown out synchronous
542 // transactions.
543 {
544 std::unique_lock<std::mutex> _l(mNodeMutex);
545 auto it = mNodeForAddress.find(addr);
546 // last refcount dropped after this transaction happened
547 if (it == mNodeForAddress.end()) return OK;
548
549 // note - only updated now, instead of later, so that other threads
550 // will queue any later transactions
551
552 // TODO(b/183140903): support > 2**64 async transactions
553 // (we can do this by allowing asyncNumber to wrap, since we
554 // don't expect more than 2**64 simultaneous transactions)
555 it->second.asyncNumber++;
556
557 if (it->second.asyncTodo.size() == 0) return OK;
558 if (it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
559 LOG_RPC_DETAIL("Found next async transaction %" PRId64 " on %s",
560 it->second.asyncNumber, addr.toString().c_str());
561
562 // justification for const_cast (consider avoiding priority_queue):
563 // - AsyncTodo operator< doesn't depend on 'data' object
564 // - gotta go fast
565 std::vector<uint8_t> data = std::move(
566 const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top()).data);
567 it->second.asyncTodo.pop();
568 _l.unlock();
569 return processTransactInternal(fd, connection, std::move(data));
570 }
571 }
572 return OK;
573 }
574
575 RpcWireReply rpcReply{
576 .status = replyStatus,
577 };
578
579 std::vector<uint8_t> replyData(sizeof(RpcWireReply) + reply.dataSize());
580 memcpy(replyData.data() + 0, &rpcReply, sizeof(RpcWireReply));
581 memcpy(replyData.data() + sizeof(RpcWireReply), reply.data(), reply.dataSize());
582
583 if (replyData.size() > std::numeric_limits<uint32_t>::max()) {
584 ALOGE("Reply size too big %zu", transactionData.size());
585 terminate();
586 return BAD_VALUE;
587 }
588
589 RpcWireHeader cmdReply{
590 .command = RPC_COMMAND_REPLY,
591 .bodySize = static_cast<uint32_t>(replyData.size()),
592 };
593
594 if (!rpcSend(fd, "reply header", &cmdReply, sizeof(RpcWireHeader))) {
595 return DEAD_OBJECT;
596 }
597 if (!rpcSend(fd, "reply body", replyData.data(), replyData.size())) {
598 return DEAD_OBJECT;
599 }
600 return OK;
601}
602
603status_t RpcState::processDecStrong(const base::unique_fd& fd, const RpcWireHeader& command) {
604 LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_DEC_STRONG, "command: %d", command.command);
605
606 std::vector<uint8_t> commandData(command.bodySize);
607 if (!rpcRec(fd, "dec ref body", commandData.data(), commandData.size())) {
608 return DEAD_OBJECT;
609 }
610
611 if (command.bodySize < sizeof(RpcWireAddress)) {
612 ALOGE("Expecting %zu but got %" PRId32 " bytes for RpcWireAddress. Terminating!",
613 sizeof(RpcWireAddress), command.bodySize);
614 terminate();
615 return BAD_VALUE;
616 }
617 RpcWireAddress* address = reinterpret_cast<RpcWireAddress*>(commandData.data());
618
619 // TODO(b/182939933): heap allocation just for lookup
620 auto addr = RpcAddress::fromRawEmbedded(address);
621 std::unique_lock<std::mutex> _l(mNodeMutex);
622 auto it = mNodeForAddress.find(addr);
623 if (it == mNodeForAddress.end()) {
624 ALOGE("Unknown binder address %s for dec strong.", addr.toString().c_str());
625 dump();
626 return OK;
627 }
628
629 sp<IBinder> target = it->second.binder.promote();
630 if (target == nullptr) {
631 ALOGE("While requesting dec strong, binder has been deleted at address %s. Terminating!",
632 addr.toString().c_str());
633 terminate();
634 return BAD_VALUE;
635 }
636
637 if (it->second.timesSent == 0) {
638 ALOGE("No record of sending binder, but requested decStrong: %s", addr.toString().c_str());
639 return OK;
640 }
641
642 LOG_ALWAYS_FATAL_IF(it->second.sentRef == nullptr, "Inconsistent state, lost ref for %s",
643 addr.toString().c_str());
644
645 sp<IBinder> tempHold;
646
647 it->second.timesSent--;
648 if (it->second.timesSent == 0) {
649 tempHold = it->second.sentRef;
650 it->second.sentRef = nullptr;
651
652 if (it->second.timesRecd == 0) {
653 mNodeForAddress.erase(it);
654 }
655 }
656
657 _l.unlock();
658 tempHold = nullptr; // destructor may make binder calls on this connection
659
660 return OK;
661}
662
663} // namespace android