libbinder: RPC disambiguate server/client

Now:
- server: RpcServer
- client: a client of an RpcServer
- incoming: a thread processing commands (either as part of an
  RpcServer's sessions or back to a client which has a threadpool)
- outgoing: a thread for sending commands (either to an RpcServer's
  sessions or back to a client which has a threadpool)

Bug: 167966510
Test: binderRpcTest
Change-Id: Iea286ab0ff6f9fb775994247003b8d29c999e10a
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 60be406..ad377d3 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -301,7 +301,7 @@
         }
 
         if (reverse) {
-            LOG_ALWAYS_FATAL_IF(!session->addClientConnection(std::move(clientFd)),
+            LOG_ALWAYS_FATAL_IF(!session->addOutgoingConnection(std::move(clientFd)),
                                 "server state must already be initialized");
             return;
         }
@@ -350,7 +350,7 @@
     return true;
 }
 
-void RpcServer::onSessionLockedAllServerThreadsEnded(const sp<RpcSession>& session) {
+void RpcServer::onSessionLockedAllIncomingThreadsEnded(const sp<RpcSession>& session) {
     auto id = session->mId;
     LOG_ALWAYS_FATAL_IF(id == std::nullopt, "Server sessions must be initialized with ID");
     LOG_RPC_DETAIL("Dropping session %d", *id);
@@ -362,7 +362,7 @@
     (void)mSessions.erase(it);
 }
 
-void RpcServer::onSessionServerThreadEnded() {
+void RpcServer::onSessionIncomingThreadEnded() {
     mShutdownCv.notify_all();
 }
 
diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp
index 4a6362a..3dbd11f 100644
--- a/libs/binder/RpcSession.cpp
+++ b/libs/binder/RpcSession.cpp
@@ -51,7 +51,7 @@
     LOG_RPC_DETAIL("RpcSession destroyed %p", this);
 
     std::lock_guard<std::mutex> _l(mMutex);
-    LOG_ALWAYS_FATAL_IF(mServerConnections.size() != 0,
+    LOG_ALWAYS_FATAL_IF(mIncomingConnections.size() != 0,
                         "Should not be able to destroy a session with servers in use.");
 }
 
@@ -61,10 +61,10 @@
 
 void RpcSession::setMaxThreads(size_t threads) {
     std::lock_guard<std::mutex> _l(mMutex);
-    LOG_ALWAYS_FATAL_IF(!mClientConnections.empty() || !mServerConnections.empty(),
+    LOG_ALWAYS_FATAL_IF(!mOutgoingConnections.empty() || !mIncomingConnections.empty(),
                         "Must set max threads before setting up connections, but has %zu client(s) "
                         "and %zu server(s)",
-                        mClientConnections.size(), mServerConnections.size());
+                        mOutgoingConnections.size(), mIncomingConnections.size());
     mMaxThreads = threads;
 }
 
@@ -100,7 +100,7 @@
         return false;
     }
 
-    return addClientConnection(std::move(serverFd));
+    return addOutgoingConnection(std::move(serverFd));
 }
 
 sp<IBinder> RpcSession::getRootObject() {
@@ -233,13 +233,13 @@
     return OK;
 }
 
-void RpcSession::WaitForShutdownListener::onSessionLockedAllServerThreadsEnded(
+void RpcSession::WaitForShutdownListener::onSessionLockedAllIncomingThreadsEnded(
         const sp<RpcSession>& session) {
     (void)session;
     mShutdown = true;
 }
 
-void RpcSession::WaitForShutdownListener::onSessionServerThreadEnded() {
+void RpcSession::WaitForShutdownListener::onSessionIncomingThreadEnded() {
     mCv.notify_all();
 }
 
@@ -263,7 +263,7 @@
 RpcSession::PreJoinSetupResult RpcSession::preJoinSetup(base::unique_fd fd) {
     // must be registered to allow arbitrary client code executing commands to
     // be able to do nested calls (we can't only read from it)
-    sp<RpcConnection> connection = assignServerToThisThread(std::move(fd));
+    sp<RpcConnection> connection = assignIncomingConnectionToThisThread(std::move(fd));
 
     status_t status = mState->readConnectionInit(connection, sp<RpcSession>::fromExisting(this));
 
@@ -291,7 +291,7 @@
               statusToString(setupResult.status).c_str());
     }
 
-    LOG_ALWAYS_FATAL_IF(!session->removeServerConnection(connection),
+    LOG_ALWAYS_FATAL_IF(!session->removeIncomingConnection(connection),
                         "bad state: connection object guaranteed to be in list");
 
     sp<RpcSession::EventListener> listener;
@@ -308,7 +308,7 @@
     session = nullptr;
 
     if (listener != nullptr) {
-        listener->onSessionServerThreadEnded();
+        listener->onSessionIncomingThreadEnded();
     }
 }
 
@@ -319,9 +319,9 @@
 bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
     {
         std::lock_guard<std::mutex> _l(mMutex);
-        LOG_ALWAYS_FATAL_IF(mClientConnections.size() != 0,
+        LOG_ALWAYS_FATAL_IF(mOutgoingConnections.size() != 0,
                             "Must only setup session once, but already has %zu clients",
-                            mClientConnections.size());
+                            mOutgoingConnections.size());
     }
 
     if (!setupOneSocketConnection(addr, RPC_SESSION_ID_NEW, false /*reverse*/)) return false;
@@ -427,7 +427,7 @@
             LOG_ALWAYS_FATAL_IF(!ownershipTransferred);
             return true;
         } else {
-            return addClientConnection(std::move(serverFd));
+            return addOutgoingConnection(std::move(serverFd));
         }
     }
 
@@ -435,7 +435,7 @@
     return false;
 }
 
-bool RpcSession::addClientConnection(unique_fd fd) {
+bool RpcSession::addOutgoingConnection(unique_fd fd) {
     sp<RpcConnection> connection = sp<RpcConnection>::make();
     {
         std::lock_guard<std::mutex> _l(mMutex);
@@ -450,7 +450,7 @@
 
         connection->fd = std::move(fd);
         connection->exclusiveTid = gettid();
-        mClientConnections.push_back(connection);
+        mOutgoingConnections.push_back(connection);
     }
 
     status_t status = mState->sendConnectionInit(connection, sp<RpcSession>::fromExisting(this));
@@ -480,25 +480,26 @@
     return true;
 }
 
-sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) {
+sp<RpcSession::RpcConnection> RpcSession::assignIncomingConnectionToThisThread(unique_fd fd) {
     std::lock_guard<std::mutex> _l(mMutex);
     sp<RpcConnection> session = sp<RpcConnection>::make();
     session->fd = std::move(fd);
     session->exclusiveTid = gettid();
-    mServerConnections.push_back(session);
+    mIncomingConnections.push_back(session);
 
     return session;
 }
 
-bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
+bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
     std::lock_guard<std::mutex> _l(mMutex);
-    if (auto it = std::find(mServerConnections.begin(), mServerConnections.end(), connection);
-        it != mServerConnections.end()) {
-        mServerConnections.erase(it);
-        if (mServerConnections.size() == 0) {
+    if (auto it = std::find(mIncomingConnections.begin(), mIncomingConnections.end(), connection);
+        it != mIncomingConnections.end()) {
+        mIncomingConnections.erase(it);
+        if (mIncomingConnections.size() == 0) {
             sp<EventListener> listener = mEventListener.promote();
             if (listener) {
-                listener->onSessionLockedAllServerThreadsEnded(sp<RpcSession>::fromExisting(this));
+                listener->onSessionLockedAllIncomingThreadsEnded(
+                        sp<RpcSession>::fromExisting(this));
             }
         }
         return true;
@@ -523,11 +524,11 @@
         // CHECK FOR DEDICATED CLIENT SOCKET
         //
         // A server/looper should always use a dedicated connection if available
-        findConnection(tid, &exclusive, &available, session->mClientConnections,
-                       session->mClientConnectionsOffset);
+        findConnection(tid, &exclusive, &available, session->mOutgoingConnections,
+                       session->mOutgoingConnectionsOffset);
 
         // WARNING: this assumes a server cannot request its client to send
-        // a transaction, as mServerConnections is excluded below.
+        // a transaction, as mIncomingConnections is excluded below.
         //
         // Imagine we have more than one thread in play, and a single thread
         // sends a synchronous, then an asynchronous command. Imagine the
@@ -537,29 +538,29 @@
         // command. So, we move to considering the second available thread
         // for subsequent calls.
         if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
-            session->mClientConnectionsOffset =
-                    (session->mClientConnectionsOffset + 1) % session->mClientConnections.size();
+            session->mOutgoingConnectionsOffset = (session->mOutgoingConnectionsOffset + 1) %
+                    session->mOutgoingConnections.size();
         }
 
         // USE SERVING SOCKET (e.g. nested transaction)
         if (use != ConnectionUse::CLIENT_ASYNC) {
-            sp<RpcConnection> exclusiveServer;
+            sp<RpcConnection> exclusiveIncoming;
             // server connections are always assigned to a thread
-            findConnection(tid, &exclusiveServer, nullptr /*available*/,
-                           session->mServerConnections, 0 /* index hint */);
+            findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
+                           session->mIncomingConnections, 0 /* index hint */);
 
             // asynchronous calls cannot be nested, we currently allow ref count
             // calls to be nested (so that you can use this without having extra
             // threads). Note 'drainCommands' is used so that these ref counts can't
             // build up.
-            if (exclusiveServer != nullptr) {
-                if (exclusiveServer->allowNested) {
+            if (exclusiveIncoming != nullptr) {
+                if (exclusiveIncoming->allowNested) {
                     // guaranteed to be processed as nested command
-                    exclusive = exclusiveServer;
+                    exclusive = exclusiveIncoming;
                 } else if (use == ConnectionUse::CLIENT_REFCOUNT && available == nullptr) {
                     // prefer available socket, but if we don't have one, don't
                     // wait for one
-                    exclusive = exclusiveServer;
+                    exclusive = exclusiveIncoming;
                 }
             }
         }
@@ -575,16 +576,16 @@
             break;
         }
 
-        if (session->mClientConnections.size() == 0) {
+        if (session->mOutgoingConnections.size() == 0) {
             ALOGE("Session has no client connections. This is required for an RPC server to make "
                   "any non-nested (e.g. oneway or on another thread) calls. Use: %d. Server "
                   "connections: %zu",
-                  static_cast<int>(use), session->mServerConnections.size());
+                  static_cast<int>(use), session->mIncomingConnections.size());
             return WOULD_BLOCK;
         }
 
         LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
-                       session->mClientConnections.size(), session->mServerConnections.size());
+                       session->mOutgoingConnections.size(), session->mIncomingConnections.size());
         session->mAvailableConnectionCv.wait(_l);
     }
     session->mWaitingThreads--;
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 050f4fb..8dd6daf 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -272,7 +272,7 @@
 
 status_t RpcState::sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
                                       const sp<RpcSession>& session) {
-    RpcClientConnectionInit init{
+    RpcOutgoingConnectionInit init{
             .msg = RPC_CONNECTION_INIT_OKAY,
     };
     return rpcSend(connection, session, "connection init", &init, sizeof(init));
@@ -280,7 +280,7 @@
 
 status_t RpcState::readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
                                       const sp<RpcSession>& session) {
-    RpcClientConnectionInit init;
+    RpcOutgoingConnectionInit init;
     if (status_t status = rpcRec(connection, session, "connection init", &init, sizeof(init));
         status != OK)
         return status;
@@ -470,7 +470,7 @@
 
         if (command.command == RPC_COMMAND_REPLY) break;
 
-        if (status_t status = processServerCommand(connection, session, command, CommandType::ANY);
+        if (status_t status = processCommand(connection, session, command, CommandType::ANY);
             status != OK)
             return status;
     }
@@ -539,7 +539,7 @@
         status != OK)
         return status;
 
-    return processServerCommand(connection, session, command, type);
+    return processCommand(connection, session, command, type);
 }
 
 status_t RpcState::drainCommands(const sp<RpcSession::RpcConnection>& connection,
@@ -553,9 +553,9 @@
     return OK;
 }
 
-status_t RpcState::processServerCommand(const sp<RpcSession::RpcConnection>& connection,
-                                        const sp<RpcSession>& session, const RpcWireHeader& command,
-                                        CommandType type) {
+status_t RpcState::processCommand(const sp<RpcSession::RpcConnection>& connection,
+                                  const sp<RpcSession>& session, const RpcWireHeader& command,
+                                  CommandType type) {
     IPCThreadState* kernelBinderState = IPCThreadState::selfOrNull();
     IPCThreadState::SpGuard spGuard{
             .address = __builtin_frame_address(0),
diff --git a/libs/binder/RpcState.h b/libs/binder/RpcState.h
index 0dcbb22..d306595 100644
--- a/libs/binder/RpcState.h
+++ b/libs/binder/RpcState.h
@@ -145,9 +145,9 @@
 
     [[nodiscard]] status_t waitForReply(const sp<RpcSession::RpcConnection>& connection,
                                         const sp<RpcSession>& session, Parcel* reply);
-    [[nodiscard]] status_t processServerCommand(const sp<RpcSession::RpcConnection>& connection,
-                                                const sp<RpcSession>& session,
-                                                const RpcWireHeader& command, CommandType type);
+    [[nodiscard]] status_t processCommand(const sp<RpcSession::RpcConnection>& connection,
+                                          const sp<RpcSession>& session,
+                                          const RpcWireHeader& command, CommandType type);
     [[nodiscard]] status_t processTransact(const sp<RpcSession::RpcConnection>& connection,
                                            const sp<RpcSession>& session,
                                            const RpcWireHeader& command);
diff --git a/libs/binder/RpcWireFormat.h b/libs/binder/RpcWireFormat.h
index b5e5bc1..92da856 100644
--- a/libs/binder/RpcWireFormat.h
+++ b/libs/binder/RpcWireFormat.h
@@ -43,7 +43,7 @@
  * transaction. The main use of this is in order to control the timing for when
  * a reverse connection is setup.
  */
-struct RpcClientConnectionInit {
+struct RpcOutgoingConnectionInit {
     char msg[4];
     uint8_t reserved[4];
 };
diff --git a/libs/binder/include/binder/RpcServer.h b/libs/binder/include/binder/RpcServer.h
index 4e6934b..fdcb3a8 100644
--- a/libs/binder/include/binder/RpcServer.h
+++ b/libs/binder/include/binder/RpcServer.h
@@ -155,8 +155,8 @@
     friend sp<RpcServer>;
     RpcServer();
 
-    void onSessionLockedAllServerThreadsEnded(const sp<RpcSession>& session) override;
-    void onSessionServerThreadEnded() override;
+    void onSessionLockedAllIncomingThreadsEnded(const sp<RpcSession>& session) override;
+    void onSessionIncomingThreadEnded() override;
 
     static void establishConnection(sp<RpcServer>&& server, base::unique_fd clientFd);
     bool setupSocketServer(const RpcSocketAddress& address);
diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h
index e40154b..eaa86dd 100644
--- a/libs/binder/include/binder/RpcSession.h
+++ b/libs/binder/include/binder/RpcSession.h
@@ -170,14 +170,14 @@
 
     class EventListener : public virtual RefBase {
     public:
-        virtual void onSessionLockedAllServerThreadsEnded(const sp<RpcSession>& session) = 0;
-        virtual void onSessionServerThreadEnded() = 0;
+        virtual void onSessionLockedAllIncomingThreadsEnded(const sp<RpcSession>& session) = 0;
+        virtual void onSessionIncomingThreadEnded() = 0;
     };
 
     class WaitForShutdownListener : public EventListener {
     public:
-        void onSessionLockedAllServerThreadsEnded(const sp<RpcSession>& session) override;
-        void onSessionServerThreadEnded() override;
+        void onSessionLockedAllIncomingThreadsEnded(const sp<RpcSession>& session) override;
+        void onSessionIncomingThreadEnded() override;
         void waitForShutdown(std::unique_lock<std::mutex>& lock);
 
     private:
@@ -219,12 +219,12 @@
     [[nodiscard]] bool setupSocketClient(const RpcSocketAddress& address);
     [[nodiscard]] bool setupOneSocketConnection(const RpcSocketAddress& address, int32_t sessionId,
                                                 bool server);
-    [[nodiscard]] bool addClientConnection(base::unique_fd fd);
+    [[nodiscard]] bool addOutgoingConnection(base::unique_fd fd);
     [[nodiscard]] bool setForServer(const wp<RpcServer>& server,
                                     const wp<RpcSession::EventListener>& eventListener,
                                     int32_t sessionId);
-    sp<RpcConnection> assignServerToThisThread(base::unique_fd fd);
-    [[nodiscard]] bool removeServerConnection(const sp<RpcConnection>& connection);
+    sp<RpcConnection> assignIncomingConnectionToThisThread(base::unique_fd fd);
+    [[nodiscard]] bool removeIncomingConnection(const sp<RpcConnection>& connection);
 
     enum class ConnectionUse {
         CLIENT,
@@ -256,13 +256,13 @@
         bool mReentrant = false;
     };
 
-    // On the other side of a session, for each of mClientConnections here, there should
-    // be one of mServerConnections on the other side (and vice versa).
+    // On the other side of a session, for each of mOutgoingConnections here, there should
+    // be one of mIncomingConnections on the other side (and vice versa).
     //
     // For the simplest session, a single server with one client, you would
     // have:
-    //  - the server has a single 'mServerConnections' and a thread listening on this
-    //  - the client has a single 'mClientConnections' and makes calls to this
+    //  - the server has a single 'mIncomingConnections' and a thread listening on this
+    //  - the client has a single 'mOutgoingConnections' and makes calls to this
     //  - here, when the client makes a call, the server can call back into it
     //    (nested calls), but outside of this, the client will only ever read
     //    calls from the server when it makes a call itself.
@@ -288,9 +288,9 @@
     std::condition_variable mAvailableConnectionCv; // for mWaitingThreads
     size_t mWaitingThreads = 0;
     // hint index into clients, ++ when sending an async transaction
-    size_t mClientConnectionsOffset = 0;
-    std::vector<sp<RpcConnection>> mClientConnections;
-    std::vector<sp<RpcConnection>> mServerConnections;
+    size_t mOutgoingConnectionsOffset = 0;
+    std::vector<sp<RpcConnection>> mOutgoingConnections;
+    std::vector<sp<RpcConnection>> mIncomingConnections;
     std::map<std::thread::id, std::thread> mThreads;
 };