libbinder: RPC big 'session' rename

Before, 'connection' was used for a group of TCP connection. This
updates the names for most types to work with the following definitions:

- session - group of connections
- connection - a single tcp connection
- socket - only when referring to a socket being setup for a connection

Bug: N/A
Fixes: 187393805 # conflict
Test: binderRpcTest, binderRpcBenchmark
Change-Id: If07afd9af680cd2a5ece6506df5383e5cc258663
(cherry picked from commit bdb53ab39d08f9d189706532d347f575cbdac257)
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index a97cf87..be260e8 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -112,7 +112,7 @@
         "PersistableBundle.cpp",
         "ProcessState.cpp",
         "RpcAddress.cpp",
-        "RpcConnection.cpp",
+        "RpcSession.cpp",
         "RpcServer.cpp",
         "RpcState.cpp",
         "Static.cpp",
diff --git a/libs/binder/BpBinder.cpp b/libs/binder/BpBinder.cpp
index fdcf94a..1dcb94c 100644
--- a/libs/binder/BpBinder.cpp
+++ b/libs/binder/BpBinder.cpp
@@ -21,7 +21,7 @@
 
 #include <binder/IPCThreadState.h>
 #include <binder/IResultReceiver.h>
-#include <binder/RpcConnection.h>
+#include <binder/RpcSession.h>
 #include <binder/Stability.h>
 #include <cutils/compiler.h>
 #include <utils/Log.h>
@@ -136,15 +136,15 @@
     return sp<BpBinder>::make(BinderHandle{handle}, trackedUid);
 }
 
-sp<BpBinder> BpBinder::create(const sp<RpcConnection>& connection, const RpcAddress& address) {
-    LOG_ALWAYS_FATAL_IF(connection == nullptr, "BpBinder::create null connection");
+sp<BpBinder> BpBinder::create(const sp<RpcSession>& session, const RpcAddress& address) {
+    LOG_ALWAYS_FATAL_IF(session == nullptr, "BpBinder::create null session");
 
     // These are not currently tracked, since there is no UID or other
     // identifier to track them with. However, if similar functionality is
-    // needed, connection objects keep track of all BpBinder objects on a
-    // per-connection basis.
+    // needed, session objects keep track of all BpBinder objects on a
+    // per-session basis.
 
-    return sp<BpBinder>::make(SocketHandle{connection, address});
+    return sp<BpBinder>::make(RpcHandle{session, address});
 }
 
 BpBinder::BpBinder(Handle&& handle)
@@ -165,20 +165,20 @@
     IPCThreadState::self()->incWeakHandle(this->binderHandle(), this);
 }
 
-BpBinder::BpBinder(SocketHandle&& handle) : BpBinder(Handle(handle)) {
-    LOG_ALWAYS_FATAL_IF(rpcConnection() == nullptr, "BpBinder created w/o connection object");
+BpBinder::BpBinder(RpcHandle&& handle) : BpBinder(Handle(handle)) {
+    LOG_ALWAYS_FATAL_IF(rpcSession() == nullptr, "BpBinder created w/o session object");
 }
 
 bool BpBinder::isRpcBinder() const {
-    return std::holds_alternative<SocketHandle>(mHandle);
+    return std::holds_alternative<RpcHandle>(mHandle);
 }
 
 const RpcAddress& BpBinder::rpcAddress() const {
-    return std::get<SocketHandle>(mHandle).address;
+    return std::get<RpcHandle>(mHandle).address;
 }
 
-const sp<RpcConnection>& BpBinder::rpcConnection() const {
-    return std::get<SocketHandle>(mHandle).connection;
+const sp<RpcSession>& BpBinder::rpcSession() const {
+    return std::get<RpcHandle>(mHandle).session;
 }
 
 int32_t BpBinder::binderHandle() const {
@@ -273,7 +273,7 @@
 
         status_t status;
         if (CC_UNLIKELY(isRpcBinder())) {
-            status = rpcConnection()->transact(rpcAddress(), code, data, reply, flags);
+            status = rpcSession()->transact(rpcAddress(), code, data, reply, flags);
         } else {
             status = IPCThreadState::self()->transact(binderHandle(), code, data, reply, flags);
         }
@@ -479,7 +479,7 @@
 {
     ALOGV("onLastStrongRef BpBinder %p handle %d\n", this, binderHandle());
     if (CC_UNLIKELY(isRpcBinder())) {
-        (void)rpcConnection()->sendDecStrong(rpcAddress());
+        (void)rpcSession()->sendDecStrong(rpcAddress());
         return;
     }
     IF_ALOGV() {
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index 5627a78..ee834ea 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -202,7 +202,7 @@
             status_t status = writeInt32(1); // non-null
             if (status != OK) return status;
             RpcAddress address = RpcAddress::zero();
-            status = mConnection->state()->onBinderLeaving(mConnection, binder, &address);
+            status = mSession->state()->onBinderLeaving(mSession, binder, &address);
             if (status != OK) return status;
             status = address.writeToParcel(this);
             if (status != OK) return status;
@@ -273,8 +273,7 @@
 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
 {
     if (isForRpc()) {
-        LOG_ALWAYS_FATAL_IF(mConnection == nullptr,
-                            "RpcConnection required to read from remote parcel");
+        LOG_ALWAYS_FATAL_IF(mSession == nullptr, "RpcSession required to read from remote parcel");
 
         int32_t isNull;
         status_t status = readInt32(&isNull);
@@ -286,7 +285,7 @@
             auto addr = RpcAddress::zero();
             status_t status = addr.readFromParcel(*this);
             if (status != OK) return status;
-            binder = mConnection->state()->onBinderEntering(mConnection, addr);
+            binder = mSession->state()->onBinderEntering(mSession, addr);
         }
 
         return finishUnflattenBinder(binder, out);
@@ -568,20 +567,20 @@
     LOG_ALWAYS_FATAL_IF(mData != nullptr, "format must be set before data is written");
 
     if (binder && binder->remoteBinder() && binder->remoteBinder()->isRpcBinder()) {
-        markForRpc(binder->remoteBinder()->getPrivateAccessorForId().rpcConnection());
+        markForRpc(binder->remoteBinder()->getPrivateAccessorForId().rpcSession());
     }
 }
 
-void Parcel::markForRpc(const sp<RpcConnection>& connection) {
+void Parcel::markForRpc(const sp<RpcSession>& session) {
     LOG_ALWAYS_FATAL_IF(mData != nullptr && mOwner == nullptr,
                         "format must be set before data is written OR on IPC data");
 
-    LOG_ALWAYS_FATAL_IF(connection == nullptr, "markForRpc requires connection");
-    mConnection = connection;
+    LOG_ALWAYS_FATAL_IF(session == nullptr, "markForRpc requires session");
+    mSession = session;
 }
 
 bool Parcel::isForRpc() const {
-    return mConnection != nullptr;
+    return mSession != nullptr;
 }
 
 void Parcel::updateWorkSourceRequestHeaderPosition() const {
@@ -2499,7 +2498,7 @@
     mDataPos = 0;
     ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
     ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
-    mConnection = nullptr;
+    mSession = nullptr;
     mObjects = nullptr;
     mObjectsSize = 0;
     mObjectsCapacity = 0;
diff --git a/libs/binder/RpcConnection.cpp b/libs/binder/RpcConnection.cpp
deleted file mode 100644
index 4b3a53f..0000000
--- a/libs/binder/RpcConnection.cpp
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "RpcConnection"
-
-#include <binder/RpcConnection.h>
-
-#include <inttypes.h>
-#include <unistd.h>
-
-#include <string_view>
-
-#include <binder/Parcel.h>
-#include <binder/Stability.h>
-#include <utils/String8.h>
-
-#include "RpcSocketAddress.h"
-#include "RpcState.h"
-#include "RpcWireFormat.h"
-
-#ifdef __GLIBC__
-extern "C" pid_t gettid();
-#endif
-
-namespace android {
-
-using base::unique_fd;
-
-RpcConnection::RpcConnection() {
-    LOG_RPC_DETAIL("RpcConnection created %p", this);
-
-    mState = std::make_unique<RpcState>();
-}
-RpcConnection::~RpcConnection() {
-    LOG_RPC_DETAIL("RpcConnection destroyed %p", this);
-
-    std::lock_guard<std::mutex> _l(mSocketMutex);
-    LOG_ALWAYS_FATAL_IF(mServers.size() != 0,
-                        "Should not be able to destroy a connection with servers in use.");
-}
-
-sp<RpcConnection> RpcConnection::make() {
-    return sp<RpcConnection>::make();
-}
-
-bool RpcConnection::setupUnixDomainClient(const char* path) {
-    return setupSocketClient(UnixSocketAddress(path));
-}
-
-#ifdef __BIONIC__
-
-bool RpcConnection::setupVsockClient(unsigned int cid, unsigned int port) {
-    return setupSocketClient(VsockSocketAddress(cid, port));
-}
-
-#endif // __BIONIC__
-
-bool RpcConnection::setupInetClient(const char* addr, unsigned int port) {
-    auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
-    if (aiStart == nullptr) return false;
-    for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
-        InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
-        if (setupSocketClient(socketAddress)) return true;
-    }
-    ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
-    return false;
-}
-
-bool RpcConnection::addNullDebuggingClient() {
-    unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
-
-    if (serverFd == -1) {
-        ALOGE("Could not connect to /dev/null: %s", strerror(errno));
-        return false;
-    }
-
-    addClient(std::move(serverFd));
-    return true;
-}
-
-sp<IBinder> RpcConnection::getRootObject() {
-    ExclusiveSocket socket(sp<RpcConnection>::fromExisting(this), SocketUse::CLIENT);
-    return state()->getRootObject(socket.fd(), sp<RpcConnection>::fromExisting(this));
-}
-
-status_t RpcConnection::getMaxThreads(size_t* maxThreads) {
-    ExclusiveSocket socket(sp<RpcConnection>::fromExisting(this), SocketUse::CLIENT);
-    return state()->getMaxThreads(socket.fd(), sp<RpcConnection>::fromExisting(this), maxThreads);
-}
-
-status_t RpcConnection::transact(const RpcAddress& address, uint32_t code, const Parcel& data,
-                                 Parcel* reply, uint32_t flags) {
-    ExclusiveSocket socket(sp<RpcConnection>::fromExisting(this),
-                           (flags & IBinder::FLAG_ONEWAY) ? SocketUse::CLIENT_ASYNC
-                                                          : SocketUse::CLIENT);
-    return state()->transact(socket.fd(), address, code, data,
-                             sp<RpcConnection>::fromExisting(this), reply, flags);
-}
-
-status_t RpcConnection::sendDecStrong(const RpcAddress& address) {
-    ExclusiveSocket socket(sp<RpcConnection>::fromExisting(this), SocketUse::CLIENT_REFCOUNT);
-    return state()->sendDecStrong(socket.fd(), address);
-}
-
-status_t RpcConnection::readId() {
-    {
-        std::lock_guard<std::mutex> _l(mSocketMutex);
-        LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
-    }
-
-    int32_t id;
-
-    ExclusiveSocket socket(sp<RpcConnection>::fromExisting(this), SocketUse::CLIENT);
-    status_t status =
-            state()->getConnectionId(socket.fd(), sp<RpcConnection>::fromExisting(this), &id);
-    if (status != OK) return status;
-
-    LOG_RPC_DETAIL("RpcConnection %p has id %d", this, id);
-    mId = id;
-    return OK;
-}
-
-void RpcConnection::startThread(unique_fd client) {
-    std::lock_guard<std::mutex> _l(mSocketMutex);
-    sp<RpcConnection> holdThis = sp<RpcConnection>::fromExisting(this);
-    int fd = client.release();
-    auto thread = std::thread([=] {
-        holdThis->join(unique_fd(fd));
-        {
-            std::lock_guard<std::mutex> _l(holdThis->mSocketMutex);
-            size_t erased = mThreads.erase(std::this_thread::get_id());
-            LOG_ALWAYS_FATAL_IF(erased != 0, "Could not erase thread.");
-        }
-    });
-    mThreads[thread.get_id()] = std::move(thread);
-}
-
-void RpcConnection::join(unique_fd client) {
-    // must be registered to allow arbitrary client code executing commands to
-    // be able to do nested calls (we can't only read from it)
-    sp<ConnectionSocket> socket = assignServerToThisThread(std::move(client));
-
-    while (true) {
-        status_t error =
-                state()->getAndExecuteCommand(socket->fd, sp<RpcConnection>::fromExisting(this));
-
-        if (error != OK) {
-            ALOGI("Binder socket thread closing w/ status %s", statusToString(error).c_str());
-            break;
-        }
-    }
-
-    LOG_ALWAYS_FATAL_IF(!removeServerSocket(socket),
-                        "bad state: socket object guaranteed to be in list");
-}
-
-wp<RpcServer> RpcConnection::server() {
-    return mForServer;
-}
-
-bool RpcConnection::setupSocketClient(const RpcSocketAddress& addr) {
-    {
-        std::lock_guard<std::mutex> _l(mSocketMutex);
-        LOG_ALWAYS_FATAL_IF(mClients.size() != 0,
-                            "Must only setup connection once, but already has %zu clients",
-                            mClients.size());
-    }
-
-    if (!setupOneSocketClient(addr, RPC_CONNECTION_ID_NEW)) return false;
-
-    // TODO(b/185167543): we should add additional connections dynamically
-    // instead of all at once.
-    // TODO(b/186470974): first risk of blocking
-    size_t numThreadsAvailable;
-    if (status_t status = getMaxThreads(&numThreadsAvailable); status != OK) {
-        ALOGE("Could not get max threads after initial connection to %s: %s",
-              addr.toString().c_str(), statusToString(status).c_str());
-        return false;
-    }
-
-    if (status_t status = readId(); status != OK) {
-        ALOGE("Could not get connection id after initial connection to %s; %s",
-              addr.toString().c_str(), statusToString(status).c_str());
-        return false;
-    }
-
-    // we've already setup one client
-    for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
-        // TODO(b/185167543): avoid race w/ accept4 not being called on server
-        for (size_t tries = 0; tries < 5; tries++) {
-            if (setupOneSocketClient(addr, mId.value())) break;
-            usleep(10000);
-        }
-    }
-
-    return true;
-}
-
-bool RpcConnection::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id) {
-    unique_fd serverFd(
-            TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
-    if (serverFd == -1) {
-        int savedErrno = errno;
-        ALOGE("Could not create socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
-        return false;
-    }
-
-    if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
-        int savedErrno = errno;
-        ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
-        return false;
-    }
-
-    if (sizeof(id) != TEMP_FAILURE_RETRY(write(serverFd.get(), &id, sizeof(id)))) {
-        int savedErrno = errno;
-        ALOGE("Could not write id to socket at %s: %s", addr.toString().c_str(),
-              strerror(savedErrno));
-        return false;
-    }
-
-    LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
-
-    addClient(std::move(serverFd));
-    return true;
-}
-
-void RpcConnection::addClient(unique_fd fd) {
-    std::lock_guard<std::mutex> _l(mSocketMutex);
-    sp<ConnectionSocket> connection = sp<ConnectionSocket>::make();
-    connection->fd = std::move(fd);
-    mClients.push_back(connection);
-}
-
-void RpcConnection::setForServer(const wp<RpcServer>& server, int32_t connectionId) {
-    mId = connectionId;
-    mForServer = server;
-}
-
-sp<RpcConnection::ConnectionSocket> RpcConnection::assignServerToThisThread(unique_fd fd) {
-    std::lock_guard<std::mutex> _l(mSocketMutex);
-    sp<ConnectionSocket> connection = sp<ConnectionSocket>::make();
-    connection->fd = std::move(fd);
-    connection->exclusiveTid = gettid();
-    mServers.push_back(connection);
-
-    return connection;
-}
-
-bool RpcConnection::removeServerSocket(const sp<ConnectionSocket>& socket) {
-    std::lock_guard<std::mutex> _l(mSocketMutex);
-    if (auto it = std::find(mServers.begin(), mServers.end(), socket); it != mServers.end()) {
-        mServers.erase(it);
-        return true;
-    }
-    return false;
-}
-
-RpcConnection::ExclusiveSocket::ExclusiveSocket(const sp<RpcConnection>& connection, SocketUse use)
-      : mConnection(connection) {
-    pid_t tid = gettid();
-    std::unique_lock<std::mutex> _l(mConnection->mSocketMutex);
-
-    mConnection->mWaitingThreads++;
-    while (true) {
-        sp<ConnectionSocket> exclusive;
-        sp<ConnectionSocket> available;
-
-        // CHECK FOR DEDICATED CLIENT SOCKET
-        //
-        // A server/looper should always use a dedicated connection if available
-        findSocket(tid, &exclusive, &available, mConnection->mClients, mConnection->mClientsOffset);
-
-        // WARNING: this assumes a server cannot request its client to send
-        // a transaction, as mServers is excluded below.
-        //
-        // Imagine we have more than one thread in play, and a single thread
-        // sends a synchronous, then an asynchronous command. Imagine the
-        // asynchronous command is sent on the first client socket. Then, if
-        // we naively send a synchronous command to that same socket, the
-        // thread on the far side might be busy processing the asynchronous
-        // command. So, we move to considering the second available thread
-        // for subsequent calls.
-        if (use == SocketUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
-            mConnection->mClientsOffset =
-                    (mConnection->mClientsOffset + 1) % mConnection->mClients.size();
-        }
-
-        // USE SERVING SOCKET (for nested transaction)
-        //
-        // asynchronous calls cannot be nested
-        if (use != SocketUse::CLIENT_ASYNC) {
-            // server sockets are always assigned to a thread
-            findSocket(tid, &exclusive, nullptr /*available*/, mConnection->mServers,
-                       0 /* index hint */);
-        }
-
-        // if our thread is already using a connection, prioritize using that
-        if (exclusive != nullptr) {
-            mSocket = exclusive;
-            mReentrant = true;
-            break;
-        } else if (available != nullptr) {
-            mSocket = available;
-            mSocket->exclusiveTid = tid;
-            break;
-        }
-
-        // in regular binder, this would usually be a deadlock :)
-        LOG_ALWAYS_FATAL_IF(mConnection->mClients.size() == 0,
-                            "Not a client of any connection. You must create a connection to an "
-                            "RPC server to make any non-nested (e.g. oneway or on another thread) "
-                            "calls.");
-
-        LOG_RPC_DETAIL("No available connection (have %zu clients and %zu servers). Waiting...",
-                       mConnection->mClients.size(), mConnection->mServers.size());
-        mConnection->mSocketCv.wait(_l);
-    }
-    mConnection->mWaitingThreads--;
-}
-
-void RpcConnection::ExclusiveSocket::findSocket(pid_t tid, sp<ConnectionSocket>* exclusive,
-                                                sp<ConnectionSocket>* available,
-                                                std::vector<sp<ConnectionSocket>>& sockets,
-                                                size_t socketsIndexHint) {
-    LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
-                        "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
-
-    if (*exclusive != nullptr) return; // consistent with break below
-
-    for (size_t i = 0; i < sockets.size(); i++) {
-        sp<ConnectionSocket>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
-
-        // take first available connection (intuition = caching)
-        if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
-            *available = socket;
-            continue;
-        }
-
-        // though, prefer to take connection which is already inuse by this thread
-        // (nested transactions)
-        if (exclusive && socket->exclusiveTid == tid) {
-            *exclusive = socket;
-            break; // consistent with return above
-        }
-    }
-}
-
-RpcConnection::ExclusiveSocket::~ExclusiveSocket() {
-    // reentrant use of a connection means something less deep in the call stack
-    // is using this fd, and it retains the right to it. So, we don't give up
-    // exclusive ownership, and no thread is freed.
-    if (!mReentrant) {
-        std::unique_lock<std::mutex> _l(mConnection->mSocketMutex);
-        mSocket->exclusiveTid = std::nullopt;
-        if (mConnection->mWaitingThreads > 0) {
-            _l.unlock();
-            mConnection->mSocketCv.notify_one();
-        }
-    }
-}
-
-} // namespace android
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index de7160e..786e2db 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -149,38 +149,38 @@
         {
             std::lock_guard<std::mutex> _l(mLock);
 
-            sp<RpcConnection> connection;
-            if (id == RPC_CONNECTION_ID_NEW) {
+            sp<RpcSession> session;
+            if (id == RPC_SESSION_ID_NEW) {
                 // new client!
-                LOG_ALWAYS_FATAL_IF(mConnectionIdCounter >= INT32_MAX, "Out of connection IDs");
-                mConnectionIdCounter++;
+                LOG_ALWAYS_FATAL_IF(mSessionIdCounter >= INT32_MAX, "Out of session IDs");
+                mSessionIdCounter++;
 
-                connection = RpcConnection::make();
-                connection->setForServer(wp<RpcServer>::fromExisting(this), mConnectionIdCounter);
+                session = RpcSession::make();
+                session->setForServer(wp<RpcServer>::fromExisting(this), mSessionIdCounter);
 
-                mConnections[mConnectionIdCounter] = connection;
+                mSessions[mSessionIdCounter] = session;
             } else {
-                auto it = mConnections.find(id);
-                if (it == mConnections.end()) {
-                    ALOGE("Cannot add thread, no record of connection with ID %d", id);
+                auto it = mSessions.find(id);
+                if (it == mSessions.end()) {
+                    ALOGE("Cannot add thread, no record of session with ID %d", id);
                     continue;
                 }
-                connection = it->second;
+                session = it->second;
             }
 
-            connection->startThread(std::move(clientFd));
+            session->startThread(std::move(clientFd));
         }
     }
 }
 
-std::vector<sp<RpcConnection>> RpcServer::listConnections() {
+std::vector<sp<RpcSession>> RpcServer::listSessions() {
     std::lock_guard<std::mutex> _l(mLock);
-    std::vector<sp<RpcConnection>> connections;
-    for (auto& [id, connection] : mConnections) {
+    std::vector<sp<RpcSession>> sessions;
+    for (auto& [id, session] : mSessions) {
         (void)id;
-        connections.push_back(connection);
+        sessions.push_back(session);
     }
-    return connections;
+    return sessions;
 }
 
 bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) {
diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp
new file mode 100644
index 0000000..09ec20d
--- /dev/null
+++ b/libs/binder/RpcSession.cpp
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RpcSession"
+
+#include <binder/RpcSession.h>
+
+#include <inttypes.h>
+#include <unistd.h>
+
+#include <string_view>
+
+#include <binder/Parcel.h>
+#include <binder/Stability.h>
+#include <utils/String8.h>
+
+#include "RpcSocketAddress.h"
+#include "RpcState.h"
+#include "RpcWireFormat.h"
+
+#ifdef __GLIBC__
+extern "C" pid_t gettid();
+#endif
+
+namespace android {
+
+using base::unique_fd;
+
+RpcSession::RpcSession() {
+    LOG_RPC_DETAIL("RpcSession created %p", this);
+
+    mState = std::make_unique<RpcState>();
+}
+RpcSession::~RpcSession() {
+    LOG_RPC_DETAIL("RpcSession destroyed %p", this);
+
+    std::lock_guard<std::mutex> _l(mMutex);
+    LOG_ALWAYS_FATAL_IF(mServers.size() != 0,
+                        "Should not be able to destroy a session with servers in use.");
+}
+
+sp<RpcSession> RpcSession::make() {
+    return sp<RpcSession>::make();
+}
+
+bool RpcSession::setupUnixDomainClient(const char* path) {
+    return setupSocketClient(UnixSocketAddress(path));
+}
+
+#ifdef __BIONIC__
+
+bool RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
+    return setupSocketClient(VsockSocketAddress(cid, port));
+}
+
+#endif // __BIONIC__
+
+bool RpcSession::setupInetClient(const char* addr, unsigned int port) {
+    auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
+    if (aiStart == nullptr) return false;
+    for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
+        InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
+        if (setupSocketClient(socketAddress)) return true;
+    }
+    ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
+    return false;
+}
+
+bool RpcSession::addNullDebuggingClient() {
+    unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
+
+    if (serverFd == -1) {
+        ALOGE("Could not connect to /dev/null: %s", strerror(errno));
+        return false;
+    }
+
+    addClient(std::move(serverFd));
+    return true;
+}
+
+sp<IBinder> RpcSession::getRootObject() {
+    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
+    return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this));
+}
+
+status_t RpcSession::getMaxThreads(size_t* maxThreads) {
+    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
+    return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads);
+}
+
+status_t RpcSession::transact(const RpcAddress& address, uint32_t code, const Parcel& data,
+                              Parcel* reply, uint32_t flags) {
+    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
+                                   (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
+                                                                  : ConnectionUse::CLIENT);
+    return state()->transact(connection.fd(), address, code, data,
+                             sp<RpcSession>::fromExisting(this), reply, flags);
+}
+
+status_t RpcSession::sendDecStrong(const RpcAddress& address) {
+    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
+                                   ConnectionUse::CLIENT_REFCOUNT);
+    return state()->sendDecStrong(connection.fd(), address);
+}
+
+status_t RpcSession::readId() {
+    {
+        std::lock_guard<std::mutex> _l(mMutex);
+        LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
+    }
+
+    int32_t id;
+
+    ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
+    status_t status =
+            state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id);
+    if (status != OK) return status;
+
+    LOG_RPC_DETAIL("RpcSession %p has id %d", this, id);
+    mId = id;
+    return OK;
+}
+
+void RpcSession::startThread(unique_fd client) {
+    std::lock_guard<std::mutex> _l(mMutex);
+    sp<RpcSession> holdThis = sp<RpcSession>::fromExisting(this);
+    int fd = client.release();
+    auto thread = std::thread([=] {
+        holdThis->join(unique_fd(fd));
+        {
+            std::lock_guard<std::mutex> _l(holdThis->mMutex);
+            size_t erased = mThreads.erase(std::this_thread::get_id());
+            LOG_ALWAYS_FATAL_IF(erased != 0, "Could not erase thread.");
+        }
+    });
+    mThreads[thread.get_id()] = std::move(thread);
+}
+
+void RpcSession::join(unique_fd client) {
+    // must be registered to allow arbitrary client code executing commands to
+    // be able to do nested calls (we can't only read from it)
+    sp<RpcConnection> connection = assignServerToThisThread(std::move(client));
+
+    while (true) {
+        status_t error =
+                state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));
+
+        if (error != OK) {
+            ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str());
+            break;
+        }
+    }
+
+    LOG_ALWAYS_FATAL_IF(!removeServerConnection(connection),
+                        "bad state: connection object guaranteed to be in list");
+}
+
+wp<RpcServer> RpcSession::server() {
+    return mForServer;
+}
+
+bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
+    {
+        std::lock_guard<std::mutex> _l(mMutex);
+        LOG_ALWAYS_FATAL_IF(mClients.size() != 0,
+                            "Must only setup session once, but already has %zu clients",
+                            mClients.size());
+    }
+
+    if (!setupOneSocketClient(addr, RPC_SESSION_ID_NEW)) return false;
+
+    // TODO(b/185167543): we should add additional sessions dynamically
+    // instead of all at once.
+    // TODO(b/186470974): first risk of blocking
+    size_t numThreadsAvailable;
+    if (status_t status = getMaxThreads(&numThreadsAvailable); status != OK) {
+        ALOGE("Could not get max threads after initial session to %s: %s", addr.toString().c_str(),
+              statusToString(status).c_str());
+        return false;
+    }
+
+    if (status_t status = readId(); status != OK) {
+        ALOGE("Could not get session id after initial session to %s; %s", addr.toString().c_str(),
+              statusToString(status).c_str());
+        return false;
+    }
+
+    // we've already setup one client
+    for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
+        // TODO(b/185167543): avoid race w/ accept4 not being called on server
+        for (size_t tries = 0; tries < 5; tries++) {
+            if (setupOneSocketClient(addr, mId.value())) break;
+            usleep(10000);
+        }
+    }
+
+    return true;
+}
+
+bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id) {
+    unique_fd serverFd(
+            TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
+    if (serverFd == -1) {
+        int savedErrno = errno;
+        ALOGE("Could not create socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
+        return false;
+    }
+
+    if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
+        int savedErrno = errno;
+        ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
+        return false;
+    }
+
+    if (sizeof(id) != TEMP_FAILURE_RETRY(write(serverFd.get(), &id, sizeof(id)))) {
+        int savedErrno = errno;
+        ALOGE("Could not write id to socket at %s: %s", addr.toString().c_str(),
+              strerror(savedErrno));
+        return false;
+    }
+
+    LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
+
+    addClient(std::move(serverFd));
+    return true;
+}
+
+void RpcSession::addClient(unique_fd fd) {
+    std::lock_guard<std::mutex> _l(mMutex);
+    sp<RpcConnection> session = sp<RpcConnection>::make();
+    session->fd = std::move(fd);
+    mClients.push_back(session);
+}
+
+void RpcSession::setForServer(const wp<RpcServer>& server, int32_t sessionId) {
+    mId = sessionId;
+    mForServer = server;
+}
+
+sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) {
+    std::lock_guard<std::mutex> _l(mMutex);
+    sp<RpcConnection> session = sp<RpcConnection>::make();
+    session->fd = std::move(fd);
+    session->exclusiveTid = gettid();
+    mServers.push_back(session);
+
+    return session;
+}
+
+bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
+    std::lock_guard<std::mutex> _l(mMutex);
+    if (auto it = std::find(mServers.begin(), mServers.end(), connection); it != mServers.end()) {
+        mServers.erase(it);
+        return true;
+    }
+    return false;
+}
+
+RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& session,
+                                                     ConnectionUse use)
+      : mSession(session) {
+    pid_t tid = gettid();
+    std::unique_lock<std::mutex> _l(mSession->mMutex);
+
+    mSession->mWaitingThreads++;
+    while (true) {
+        sp<RpcConnection> exclusive;
+        sp<RpcConnection> available;
+
+        // CHECK FOR DEDICATED CLIENT SOCKET
+        //
+        // A server/looper should always use a dedicated session if available
+        findConnection(tid, &exclusive, &available, mSession->mClients, mSession->mClientsOffset);
+
+        // WARNING: this assumes a server cannot request its client to send
+        // a transaction, as mServers is excluded below.
+        //
+        // Imagine we have more than one thread in play, and a single thread
+        // sends a synchronous, then an asynchronous command. Imagine the
+        // asynchronous command is sent on the first client connection. Then, if
+        // we naively send a synchronous command to that same connection, the
+        // thread on the far side might be busy processing the asynchronous
+        // command. So, we move to considering the second available thread
+        // for subsequent calls.
+        if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
+            mSession->mClientsOffset = (mSession->mClientsOffset + 1) % mSession->mClients.size();
+        }
+
+        // USE SERVING SOCKET (for nested transaction)
+        //
+        // asynchronous calls cannot be nested
+        if (use != ConnectionUse::CLIENT_ASYNC) {
+            // server connections are always assigned to a thread
+            findConnection(tid, &exclusive, nullptr /*available*/, mSession->mServers,
+                           0 /* index hint */);
+        }
+
+        // if our thread is already using a session, prioritize using that
+        if (exclusive != nullptr) {
+            mConnection = exclusive;
+            mReentrant = true;
+            break;
+        } else if (available != nullptr) {
+            mConnection = available;
+            mConnection->exclusiveTid = tid;
+            break;
+        }
+
+        // in regular binder, this would usually be a deadlock :)
+        LOG_ALWAYS_FATAL_IF(mSession->mClients.size() == 0,
+                            "Not a client of any session. You must create a session to an "
+                            "RPC server to make any non-nested (e.g. oneway or on another thread) "
+                            "calls.");
+
+        LOG_RPC_DETAIL("No available session (have %zu clients and %zu servers). Waiting...",
+                       mSession->mClients.size(), mSession->mServers.size());
+        mSession->mAvailableConnectionCv.wait(_l);
+    }
+    mSession->mWaitingThreads--;
+}
+
+void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive,
+                                                     sp<RpcConnection>* available,
+                                                     std::vector<sp<RpcConnection>>& sockets,
+                                                     size_t socketsIndexHint) {
+    LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
+                        "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
+
+    if (*exclusive != nullptr) return; // consistent with break below
+
+    for (size_t i = 0; i < sockets.size(); i++) {
+        sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
+
+        // take first available session (intuition = caching)
+        if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
+            *available = socket;
+            continue;
+        }
+
+        // though, prefer to take session which is already inuse by this thread
+        // (nested transactions)
+        if (exclusive && socket->exclusiveTid == tid) {
+            *exclusive = socket;
+            break; // consistent with return above
+        }
+    }
+}
+
+RpcSession::ExclusiveConnection::~ExclusiveConnection() {
+    // reentrant use of a session means something less deep in the call stack
+    // is using this fd, and it retains the right to it. So, we don't give up
+    // exclusive ownership, and no thread is freed.
+    if (!mReentrant) {
+        std::unique_lock<std::mutex> _l(mSession->mMutex);
+        mConnection->exclusiveTid = std::nullopt;
+        if (mSession->mWaitingThreads > 0) {
+            _l.unlock();
+            mSession->mAvailableConnectionCv.notify_one();
+        }
+    }
+}
+
+} // namespace android
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 19dea7e..96190dc 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -31,16 +31,16 @@
 RpcState::RpcState() {}
 RpcState::~RpcState() {}
 
-status_t RpcState::onBinderLeaving(const sp<RpcConnection>& connection, const sp<IBinder>& binder,
+status_t RpcState::onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
                                    RpcAddress* outAddress) {
     bool isRemote = binder->remoteBinder();
     bool isRpc = isRemote && binder->remoteBinder()->isRpcBinder();
 
-    if (isRpc && binder->remoteBinder()->getPrivateAccessorForId().rpcConnection() != connection) {
+    if (isRpc && binder->remoteBinder()->getPrivateAccessorForId().rpcSession() != session) {
         // We need to be able to send instructions over the socket for how to
         // connect to a different server, and we also need to let the host
         // process know that this is happening.
-        ALOGE("Cannot send binder from unrelated binder RPC connection.");
+        ALOGE("Cannot send binder from unrelated binder RPC session.");
         return INVALID_OPERATION;
     }
 
@@ -91,8 +91,7 @@
     return OK;
 }
 
-sp<IBinder> RpcState::onBinderEntering(const sp<RpcConnection>& connection,
-                                       const RpcAddress& address) {
+sp<IBinder> RpcState::onBinderEntering(const sp<RpcSession>& session, const RpcAddress& address) {
     std::unique_lock<std::mutex> _l(mNodeMutex);
 
     if (auto it = mNodeForAddress.find(address); it != mNodeForAddress.end()) {
@@ -106,7 +105,7 @@
         // We have timesRecd RPC refcounts, but we only need to hold on to one
         // when we keep the object. All additional dec strongs are sent
         // immediately, we wait to send the last one in BpBinder::onLastDecStrong.
-        (void)connection->sendDecStrong(address);
+        (void)session->sendDecStrong(address);
 
         return binder;
     }
@@ -114,9 +113,9 @@
     auto&& [it, inserted] = mNodeForAddress.insert({address, BinderNode{}});
     LOG_ALWAYS_FATAL_IF(!inserted, "Failed to insert binder when creating proxy");
 
-    // Currently, all binders are assumed to be part of the same connection (no
+    // Currently, all binders are assumed to be part of the same session (no
     // device global binders in the RPC world).
-    sp<IBinder> binder = BpBinder::create(connection, it->first);
+    sp<IBinder> binder = BpBinder::create(session, it->first);
     it->second.binder = binder;
     it->second.timesRecd = 1;
     return binder;
@@ -232,14 +231,13 @@
     return true;
 }
 
-sp<IBinder> RpcState::getRootObject(const base::unique_fd& fd,
-                                    const sp<RpcConnection>& connection) {
+sp<IBinder> RpcState::getRootObject(const base::unique_fd& fd, const sp<RpcSession>& session) {
     Parcel data;
-    data.markForRpc(connection);
+    data.markForRpc(session);
     Parcel reply;
 
-    status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_ROOT, data,
-                               connection, &reply, 0);
+    status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_ROOT, data, session,
+                               &reply, 0);
     if (status != OK) {
         ALOGE("Error getting root object: %s", statusToString(status).c_str());
         return nullptr;
@@ -248,14 +246,14 @@
     return reply.readStrongBinder();
 }
 
-status_t RpcState::getMaxThreads(const base::unique_fd& fd, const sp<RpcConnection>& connection,
+status_t RpcState::getMaxThreads(const base::unique_fd& fd, const sp<RpcSession>& session,
                                  size_t* maxThreadsOut) {
     Parcel data;
-    data.markForRpc(connection);
+    data.markForRpc(session);
     Parcel reply;
 
     status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_MAX_THREADS, data,
-                               connection, &reply, 0);
+                               session, &reply, 0);
     if (status != OK) {
         ALOGE("Error getting max threads: %s", statusToString(status).c_str());
         return status;
@@ -273,29 +271,29 @@
     return OK;
 }
 
-status_t RpcState::getConnectionId(const base::unique_fd& fd, const sp<RpcConnection>& connection,
-                                   int32_t* connectionIdOut) {
+status_t RpcState::getSessionId(const base::unique_fd& fd, const sp<RpcSession>& session,
+                                int32_t* sessionIdOut) {
     Parcel data;
-    data.markForRpc(connection);
+    data.markForRpc(session);
     Parcel reply;
 
-    status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_CONNECTION_ID, data,
-                               connection, &reply, 0);
+    status_t status = transact(fd, RpcAddress::zero(), RPC_SPECIAL_TRANSACT_GET_SESSION_ID, data,
+                               session, &reply, 0);
     if (status != OK) {
-        ALOGE("Error getting connection ID: %s", statusToString(status).c_str());
+        ALOGE("Error getting session ID: %s", statusToString(status).c_str());
         return status;
     }
 
-    int32_t connectionId;
-    status = reply.readInt32(&connectionId);
+    int32_t sessionId;
+    status = reply.readInt32(&sessionId);
     if (status != OK) return status;
 
-    *connectionIdOut = connectionId;
+    *sessionIdOut = sessionId;
     return OK;
 }
 
 status_t RpcState::transact(const base::unique_fd& fd, const RpcAddress& address, uint32_t code,
-                            const Parcel& data, const sp<RpcConnection>& connection, Parcel* reply,
+                            const Parcel& data, const sp<RpcSession>& session, Parcel* reply,
                             uint32_t flags) {
     uint64_t asyncNumber = 0;
 
@@ -355,7 +353,7 @@
 
     LOG_ALWAYS_FATAL_IF(reply == nullptr, "Reply parcel must be used for synchronous transaction.");
 
-    return waitForReply(fd, connection, reply);
+    return waitForReply(fd, session, reply);
 }
 
 static void cleanup_reply_data(Parcel* p, const uint8_t* data, size_t dataSize,
@@ -367,7 +365,7 @@
     LOG_ALWAYS_FATAL_IF(objectsCount, 0);
 }
 
-status_t RpcState::waitForReply(const base::unique_fd& fd, const sp<RpcConnection>& connection,
+status_t RpcState::waitForReply(const base::unique_fd& fd, const sp<RpcSession>& session,
                                 Parcel* reply) {
     RpcWireHeader command;
     while (true) {
@@ -377,7 +375,7 @@
 
         if (command.command == RPC_COMMAND_REPLY) break;
 
-        status_t status = processServerCommand(fd, connection, command);
+        status_t status = processServerCommand(fd, session, command);
         if (status != OK) return status;
     }
 
@@ -399,7 +397,7 @@
     reply->ipcSetDataReference(rpcReply->data, command.bodySize - offsetof(RpcWireReply, data),
                                nullptr, 0, cleanup_reply_data);
 
-    reply->markForRpc(connection);
+    reply->markForRpc(session);
 
     return OK;
 }
@@ -430,8 +428,7 @@
     return OK;
 }
 
-status_t RpcState::getAndExecuteCommand(const base::unique_fd& fd,
-                                        const sp<RpcConnection>& connection) {
+status_t RpcState::getAndExecuteCommand(const base::unique_fd& fd, const sp<RpcSession>& session) {
     LOG_RPC_DETAIL("getAndExecuteCommand on fd %d", fd.get());
 
     RpcWireHeader command;
@@ -439,15 +436,14 @@
         return DEAD_OBJECT;
     }
 
-    return processServerCommand(fd, connection, command);
+    return processServerCommand(fd, session, command);
 }
 
-status_t RpcState::processServerCommand(const base::unique_fd& fd,
-                                        const sp<RpcConnection>& connection,
+status_t RpcState::processServerCommand(const base::unique_fd& fd, const sp<RpcSession>& session,
                                         const RpcWireHeader& command) {
     switch (command.command) {
         case RPC_COMMAND_TRANSACT:
-            return processTransact(fd, connection, command);
+            return processTransact(fd, session, command);
         case RPC_COMMAND_DEC_STRONG:
             return processDecStrong(fd, command);
     }
@@ -456,12 +452,12 @@
     // RPC-binder-level wire protocol is not self synchronizing, we have no way
     // to understand where the current command ends and the next one begins. We
     // also can't consider it a fatal error because this would allow any client
-    // to kill us, so ending the connection for misbehaving client.
-    ALOGE("Unknown RPC command %d - terminating connection", command.command);
+    // to kill us, so ending the session for misbehaving client.
+    ALOGE("Unknown RPC command %d - terminating session", command.command);
     terminate();
     return DEAD_OBJECT;
 }
-status_t RpcState::processTransact(const base::unique_fd& fd, const sp<RpcConnection>& connection,
+status_t RpcState::processTransact(const base::unique_fd& fd, const sp<RpcSession>& session,
                                    const RpcWireHeader& command) {
     LOG_ALWAYS_FATAL_IF(command.command != RPC_COMMAND_TRANSACT, "command: %d", command.command);
 
@@ -470,7 +466,7 @@
         return DEAD_OBJECT;
     }
 
-    return processTransactInternal(fd, connection, std::move(transactionData));
+    return processTransactInternal(fd, session, std::move(transactionData));
 }
 
 static void do_nothing_to_transact_data(Parcel* p, const uint8_t* data, size_t dataSize,
@@ -482,8 +478,7 @@
     (void)objectsCount;
 }
 
-status_t RpcState::processTransactInternal(const base::unique_fd& fd,
-                                           const sp<RpcConnection>& connection,
+status_t RpcState::processTransactInternal(const base::unique_fd& fd, const sp<RpcSession>& session,
                                            std::vector<uint8_t>&& transactionData) {
     if (transactionData.size() < sizeof(RpcWireTransaction)) {
         ALOGE("Expecting %zu but got %zu bytes for RpcWireTransaction. Terminating!",
@@ -515,7 +510,7 @@
                 // However, for local binders, it indicates a misbehaving client
                 // (any binder which is being transacted on should be holding a
                 // strong ref count), so in either case, terminating the
-                // connection.
+                // session.
                 ALOGE("While transacting, binder has been deleted at address %s. Terminating!",
                       addr.toString().c_str());
                 terminate();
@@ -545,7 +540,7 @@
     }
 
     Parcel reply;
-    reply.markForRpc(connection);
+    reply.markForRpc(session);
 
     if (replyStatus == OK) {
         Parcel data;
@@ -556,14 +551,14 @@
                                  transactionData.size() - offsetof(RpcWireTransaction, data),
                                  nullptr /*object*/, 0 /*objectCount*/,
                                  do_nothing_to_transact_data);
-        data.markForRpc(connection);
+        data.markForRpc(session);
 
         if (target) {
             replyStatus = target->transact(transaction->code, data, &reply, transaction->flags);
         } else {
             LOG_RPC_DETAIL("Got special transaction %u", transaction->code);
 
-            sp<RpcServer> server = connection->server().promote();
+            sp<RpcServer> server = session->server().promote();
             if (server) {
                 // special case for 'zero' address (special server commands)
                 switch (transaction->code) {
@@ -575,13 +570,13 @@
                         replyStatus = reply.writeInt32(server->getMaxThreads());
                         break;
                     }
-                    case RPC_SPECIAL_TRANSACT_GET_CONNECTION_ID: {
-                        // only connections w/ services can be the source of a
-                        // connection ID (so still guarded by non-null server)
+                    case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
+                        // only sessions w/ services can be the source of a
+                        // session ID (so still guarded by non-null server)
                         //
-                        // connections associated with servers must have an ID
+                        // sessions associated with servers must have an ID
                         // (hence abort)
-                        int32_t id = connection->getPrivateAccessorForId().get().value();
+                        int32_t id = session->getPrivateAccessorForId().get().value();
                         replyStatus = reply.writeInt32(id);
                         break;
                     }
@@ -639,7 +634,7 @@
                         const_cast<BinderNode::AsyncTodo&>(it->second.asyncTodo.top()).data);
                 it->second.asyncTodo.pop();
                 _l.unlock();
-                return processTransactInternal(fd, connection, std::move(data));
+                return processTransactInternal(fd, session, std::move(data));
             }
         }
         return OK;
@@ -728,7 +723,7 @@
     }
 
     _l.unlock();
-    tempHold = nullptr; // destructor may make binder calls on this connection
+    tempHold = nullptr; // destructor may make binder calls on this session
 
     return OK;
 }
diff --git a/libs/binder/RpcState.h b/libs/binder/RpcState.h
index 825fd7c..3f3eb1c 100644
--- a/libs/binder/RpcState.h
+++ b/libs/binder/RpcState.h
@@ -18,7 +18,7 @@
 #include <android-base/unique_fd.h>
 #include <binder/IBinder.h>
 #include <binder/Parcel.h>
-#include <binder/RpcConnection.h>
+#include <binder/RpcSession.h>
 
 #include <map>
 #include <queue>
@@ -43,7 +43,7 @@
 
 /**
  * Abstracts away management of ref counts and the wire format from
- * RpcConnection
+ * RpcSession
  */
 class RpcState {
 public:
@@ -51,52 +51,51 @@
     ~RpcState();
 
     // TODO(b/182940634): combine some special transactions into one "getServerInfo" call?
-    sp<IBinder> getRootObject(const base::unique_fd& fd, const sp<RpcConnection>& connection);
-    status_t getMaxThreads(const base::unique_fd& fd, const sp<RpcConnection>& connection,
+    sp<IBinder> getRootObject(const base::unique_fd& fd, const sp<RpcSession>& session);
+    status_t getMaxThreads(const base::unique_fd& fd, const sp<RpcSession>& session,
                            size_t* maxThreadsOut);
-    status_t getConnectionId(const base::unique_fd& fd, const sp<RpcConnection>& connection,
-                             int32_t* connectionIdOut);
+    status_t getSessionId(const base::unique_fd& fd, const sp<RpcSession>& session,
+                          int32_t* sessionIdOut);
 
     [[nodiscard]] status_t transact(const base::unique_fd& fd, const RpcAddress& address,
                                     uint32_t code, const Parcel& data,
-                                    const sp<RpcConnection>& connection, Parcel* reply,
-                                    uint32_t flags);
+                                    const sp<RpcSession>& session, Parcel* reply, uint32_t flags);
     [[nodiscard]] status_t sendDecStrong(const base::unique_fd& fd, const RpcAddress& address);
     [[nodiscard]] status_t getAndExecuteCommand(const base::unique_fd& fd,
-                                                const sp<RpcConnection>& connection);
+                                                const sp<RpcSession>& session);
 
     /**
      * Called by Parcel for outgoing binders. This implies one refcount of
      * ownership to the outgoing binder.
      */
-    [[nodiscard]] status_t onBinderLeaving(const sp<RpcConnection>& connection,
-                                           const sp<IBinder>& binder, RpcAddress* outAddress);
+    [[nodiscard]] status_t onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
+                                           RpcAddress* outAddress);
 
     /**
      * Called by Parcel for incoming binders. This either returns the refcount
      * to the process, if this process already has one, or it takes ownership of
      * that refcount
      */
-    sp<IBinder> onBinderEntering(const sp<RpcConnection>& connection, const RpcAddress& address);
+    sp<IBinder> onBinderEntering(const sp<RpcSession>& session, const RpcAddress& address);
 
     size_t countBinders();
     void dump();
 
 private:
     /**
-     * Called when reading or writing data to a connection fails to clean up
-     * data associated with the connection in order to cleanup binders.
+     * Called when reading or writing data to a session fails to clean up
+     * data associated with the session in order to cleanup binders.
      * Specifically, we have a strong dependency cycle, since BpBinder is
      * OBJECT_LIFETIME_WEAK (so that onAttemptIncStrong may return true).
      *
-     *     BpBinder -> RpcConnection -> RpcState
+     *     BpBinder -> RpcSession -> RpcState
      *      ^-----------------------------/
      *
      * In the success case, eventually all refcounts should be propagated over
-     * the connection, though this could also be called to eagerly cleanup
-     * the connection.
+     * the session, though this could also be called to eagerly cleanup
+     * the session.
      *
-     * WARNING: RpcState is responsible for calling this when the connection is
+     * WARNING: RpcState is responsible for calling this when the session is
      * no longer recoverable.
      */
     void terminate();
@@ -105,16 +104,15 @@
                                size_t size);
     [[nodiscard]] bool rpcRec(const base::unique_fd& fd, const char* what, void* data, size_t size);
 
-    [[nodiscard]] status_t waitForReply(const base::unique_fd& fd,
-                                        const sp<RpcConnection>& connection, Parcel* reply);
+    [[nodiscard]] status_t waitForReply(const base::unique_fd& fd, const sp<RpcSession>& session,
+                                        Parcel* reply);
     [[nodiscard]] status_t processServerCommand(const base::unique_fd& fd,
-                                                const sp<RpcConnection>& connection,
+                                                const sp<RpcSession>& session,
                                                 const RpcWireHeader& command);
-    [[nodiscard]] status_t processTransact(const base::unique_fd& fd,
-                                           const sp<RpcConnection>& connection,
+    [[nodiscard]] status_t processTransact(const base::unique_fd& fd, const sp<RpcSession>& session,
                                            const RpcWireHeader& command);
     [[nodiscard]] status_t processTransactInternal(const base::unique_fd& fd,
-                                                   const sp<RpcConnection>& connection,
+                                                   const sp<RpcSession>& session,
                                                    std::vector<uint8_t>&& transactionData);
     [[nodiscard]] status_t processDecStrong(const base::unique_fd& fd,
                                             const RpcWireHeader& command);
@@ -168,7 +166,7 @@
 
     std::mutex mNodeMutex;
     bool mTerminated = false;
-    // binders known by both sides of a connection
+    // binders known by both sides of a session
     std::map<RpcAddress, BinderNode> mNodeForAddress;
 };
 
diff --git a/libs/binder/RpcWireFormat.h b/libs/binder/RpcWireFormat.h
index a7e8a52..c5fa008 100644
--- a/libs/binder/RpcWireFormat.h
+++ b/libs/binder/RpcWireFormat.h
@@ -48,10 +48,10 @@
 enum : uint32_t {
     RPC_SPECIAL_TRANSACT_GET_ROOT = 0,
     RPC_SPECIAL_TRANSACT_GET_MAX_THREADS = 1,
-    RPC_SPECIAL_TRANSACT_GET_CONNECTION_ID = 2,
+    RPC_SPECIAL_TRANSACT_GET_SESSION_ID = 2,
 };
 
-constexpr int32_t RPC_CONNECTION_ID_NEW = -1;
+constexpr int32_t RPC_SESSION_ID_NEW = -1;
 
 // serialization is like:
 // |RpcWireHeader|struct desginated by 'command'| (over and over again)
diff --git a/libs/binder/include/binder/BpBinder.h b/libs/binder/include/binder/BpBinder.h
index ad618f9..61bf018 100644
--- a/libs/binder/include/binder/BpBinder.h
+++ b/libs/binder/include/binder/BpBinder.h
@@ -28,7 +28,7 @@
 // ---------------------------------------------------------------------------
 namespace android {
 
-class RpcConnection;
+class RpcSession;
 class RpcState;
 namespace internal {
 class Stability;
@@ -41,11 +41,11 @@
 {
 public:
     static sp<BpBinder> create(int32_t handle);
-    static sp<BpBinder> create(const sp<RpcConnection>& connection, const RpcAddress& address);
+    static sp<BpBinder> create(const sp<RpcSession>& session, const RpcAddress& address);
 
     /**
      * Return value:
-     * true - this is associated with a socket RpcConnection
+     * true - this is associated with a socket RpcSession
      * false - (usual) binder over e.g. /dev/binder
      */
     bool isRpcBinder() const;
@@ -133,7 +133,7 @@
 
         // valid if isRpcBinder
         const RpcAddress& rpcAddress() const { return mBinder->rpcAddress(); }
-        const sp<RpcConnection>& rpcConnection() const { return mBinder->rpcConnection(); }
+        const sp<RpcSession>& rpcSession() const { return mBinder->rpcSession(); }
 
         const BpBinder* mBinder;
     };
@@ -148,19 +148,19 @@
     struct BinderHandle {
         int32_t handle;
     };
-    struct SocketHandle {
-        sp<RpcConnection> connection;
+    struct RpcHandle {
+        sp<RpcSession> session;
         RpcAddress address;
     };
-    using Handle = std::variant<BinderHandle, SocketHandle>;
+    using Handle = std::variant<BinderHandle, RpcHandle>;
 
     int32_t binderHandle() const;
     const RpcAddress& rpcAddress() const;
-    const sp<RpcConnection>& rpcConnection() const;
+    const sp<RpcSession>& rpcSession() const;
 
     explicit BpBinder(Handle&& handle);
     BpBinder(BinderHandle&& handle, int32_t trackedUid);
-    explicit BpBinder(SocketHandle&& handle);
+    explicit BpBinder(RpcHandle&& handle);
 
     virtual             ~BpBinder();
     virtual void        onFirstRef();
diff --git a/libs/binder/include/binder/Parcel.h b/libs/binder/include/binder/Parcel.h
index 9578372..5aaaa0c 100644
--- a/libs/binder/include/binder/Parcel.h
+++ b/libs/binder/include/binder/Parcel.h
@@ -50,7 +50,7 @@
 class IBinder;
 class IPCThreadState;
 class ProcessState;
-class RpcConnection;
+class RpcSession;
 class String8;
 class TextOutput;
 
@@ -103,7 +103,7 @@
 
     // Whenever possible, markForBinder should be preferred. This method is
     // called automatically on reply Parcels for RPC transactions.
-    void markForRpc(const sp<RpcConnection>& connection);
+    void markForRpc(const sp<RpcSession>& session);
 
     // Whether this Parcel is written for RPC transactions (after calls to
     // markForBinder or markForRpc).
@@ -1136,7 +1136,7 @@
 
     release_func        mOwner;
 
-    sp<RpcConnection> mConnection;
+    sp<RpcSession> mSession;
 
     class Blob {
     public:
diff --git a/libs/binder/include/binder/RpcServer.h b/libs/binder/include/binder/RpcServer.h
index 81ea3a7..c98151d 100644
--- a/libs/binder/include/binder/RpcServer.h
+++ b/libs/binder/include/binder/RpcServer.h
@@ -17,7 +17,7 @@
 
 #include <android-base/unique_fd.h>
 #include <binder/IBinder.h>
-#include <binder/RpcConnection.h>
+#include <binder/RpcSession.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 
@@ -48,12 +48,12 @@
     static sp<RpcServer> make();
 
     /**
-     * This represents a connection for responses, e.g.:
+     * This represents a session for responses, e.g.:
      *
      *     process A serves binder a
-     *     process B opens a connection to process A
+     *     process B opens a session to process A
      *     process B makes binder b and sends it to A
-     *     A uses this 'back connection' to send things back to B
+     *     A uses this 'back session' to send things back to B
      */
     [[nodiscard]] bool setupUnixDomainServer(const char* path);
 
@@ -78,7 +78,7 @@
     void iUnderstandThisCodeIsExperimentalAndIWillNotUseItInProduction();
 
     /**
-     * This must be called before adding a client connection.
+     * This must be called before adding a client session.
      *
      * If this is not specified, this will be a single-threaded server.
      *
@@ -96,7 +96,7 @@
     sp<IBinder> getRootObject();
 
     /**
-     * You must have at least one client connection before calling this.
+     * You must have at least one client session before calling this.
      *
      * TODO(b/185167543): way to shut down?
      */
@@ -105,7 +105,7 @@
     /**
      * For debugging!
      */
-    std::vector<sp<RpcConnection>> listConnections();
+    std::vector<sp<RpcSession>> listSessions();
 
     ~RpcServer();
 
@@ -118,12 +118,12 @@
     bool mAgreedExperimental = false;
     bool mStarted = false; // TODO(b/185167543): support dynamically added clients
     size_t mMaxThreads = 1;
-    base::unique_fd mServer; // socket we are accepting connections on
+    base::unique_fd mServer; // socket we are accepting sessions on
 
     std::mutex mLock; // for below
     sp<IBinder> mRootObject;
-    std::map<int32_t, sp<RpcConnection>> mConnections;
-    int32_t mConnectionIdCounter = 0;
+    std::map<int32_t, sp<RpcSession>> mSessions;
+    int32_t mSessionIdCounter = 0;
 };
 
 } // namespace android
diff --git a/libs/binder/include/binder/RpcConnection.h b/libs/binder/include/binder/RpcSession.h
similarity index 66%
rename from libs/binder/include/binder/RpcConnection.h
rename to libs/binder/include/binder/RpcSession.h
index 87984d7..3f58b2c 100644
--- a/libs/binder/include/binder/RpcConnection.h
+++ b/libs/binder/include/binder/RpcSession.h
@@ -38,12 +38,13 @@
 class RpcState;
 
 /**
- * This represents a multi-threaded/multi-socket connection between a client
- * and a server.
+ * This represents a session (group of connections) between a client
+ * and a server. Multiple connections are needed for multiple parallel "binder"
+ * calls which may also have nested calls.
  */
-class RpcConnection final : public virtual RefBase {
+class RpcSession final : public virtual RefBase {
 public:
-    static sp<RpcConnection> make();
+    static sp<RpcSession> make();
 
     /**
      * This should be called once per thread, matching 'join' in the remote
@@ -66,20 +67,20 @@
     /**
      * For debugging!
      *
-     * Sets up an empty socket. All queries to this socket which require a
+     * Sets up an empty connection. All queries to this connection which require a
      * response will never be satisfied. All data sent here will be
      * unceremoniously cast down the bottomless pit, /dev/null.
      */
     [[nodiscard]] bool addNullDebuggingClient();
 
     /**
-     * Query the other side of the connection for the root object hosted by that
+     * Query the other side of the session for the root object hosted by that
      * process's RpcServer (if one exists)
      */
     sp<IBinder> getRootObject();
 
     /**
-     * Query the other side of the connection for the maximum number of threads
+     * Query the other side of the session for the maximum number of threads
      * it supports (maximum number of concurrent non-nested synchronous transactions)
      */
     status_t getMaxThreads(size_t* maxThreads);
@@ -88,7 +89,7 @@
                                     Parcel* reply, uint32_t flags);
     [[nodiscard]] status_t sendDecStrong(const RpcAddress& address);
 
-    ~RpcConnection();
+    ~RpcSession();
 
     wp<RpcServer> server();
 
@@ -97,28 +98,28 @@
 
     class PrivateAccessorForId {
     private:
-        friend class RpcConnection;
+        friend class RpcSession;
         friend class RpcState;
-        explicit PrivateAccessorForId(const RpcConnection* connection) : mConnection(connection) {}
+        explicit PrivateAccessorForId(const RpcSession* session) : mSession(session) {}
 
-        const std::optional<int32_t> get() { return mConnection->mId; }
+        const std::optional<int32_t> get() { return mSession->mId; }
 
-        const RpcConnection* mConnection;
+        const RpcSession* mSession;
     };
     PrivateAccessorForId getPrivateAccessorForId() const { return PrivateAccessorForId(this); }
 
 private:
     friend PrivateAccessorForId;
-    friend sp<RpcConnection>;
+    friend sp<RpcSession>;
     friend RpcServer;
-    RpcConnection();
+    RpcSession();
 
     status_t readId();
 
     void startThread(base::unique_fd client);
     void join(base::unique_fd client);
 
-    struct ConnectionSocket : public RefBase {
+    struct RpcConnection : public RefBase {
         base::unique_fd fd;
 
         // whether this or another thread is currently using this fd to make
@@ -127,32 +128,33 @@
     };
 
     bool setupSocketClient(const RpcSocketAddress& address);
-    bool setupOneSocketClient(const RpcSocketAddress& address, int32_t connectionId);
+    bool setupOneSocketClient(const RpcSocketAddress& address, int32_t sessionId);
     void addClient(base::unique_fd fd);
-    void setForServer(const wp<RpcServer>& server, int32_t connectionId);
-    sp<ConnectionSocket> assignServerToThisThread(base::unique_fd fd);
-    bool removeServerSocket(const sp<ConnectionSocket>& socket);
+    void setForServer(const wp<RpcServer>& server, int32_t sessionId);
+    sp<RpcConnection> assignServerToThisThread(base::unique_fd fd);
+    bool removeServerConnection(const sp<RpcConnection>& connection);
 
-    enum class SocketUse {
+    enum class ConnectionUse {
         CLIENT,
         CLIENT_ASYNC,
         CLIENT_REFCOUNT,
     };
 
-    // RAII object for connection socket
-    class ExclusiveSocket {
+    // RAII object for session connection
+    class ExclusiveConnection {
     public:
-        explicit ExclusiveSocket(const sp<RpcConnection>& connection, SocketUse use);
-        ~ExclusiveSocket();
-        const base::unique_fd& fd() { return mSocket->fd; }
+        explicit ExclusiveConnection(const sp<RpcSession>& session, ConnectionUse use);
+        ~ExclusiveConnection();
+        const base::unique_fd& fd() { return mConnection->fd; }
 
     private:
-        static void findSocket(pid_t tid, sp<ConnectionSocket>* exclusive,
-                               sp<ConnectionSocket>* available,
-                               std::vector<sp<ConnectionSocket>>& sockets, size_t socketsIndexHint);
+        static void findConnection(pid_t tid, sp<RpcConnection>* exclusive,
+                                   sp<RpcConnection>* available,
+                                   std::vector<sp<RpcConnection>>& sockets,
+                                   size_t socketsIndexHint);
 
-        sp<RpcConnection> mConnection; // avoid deallocation
-        sp<ConnectionSocket> mSocket;
+        sp<RpcSession> mSession; // avoid deallocation
+        sp<RpcConnection> mConnection;
 
         // whether this is being used for a nested transaction (being on the same
         // thread guarantees we won't write in the middle of a message, the way
@@ -160,10 +162,10 @@
         bool mReentrant = false;
     };
 
-    // On the other side of a connection, for each of mClients here, there should
+    // On the other side of a session, for each of mClients here, there should
     // be one of mServers on the other side (and vice versa).
     //
-    // For the simplest connection, a single server with one client, you would
+    // For the simplest session, a single server with one client, you would
     // have:
     //  - the server has a single 'mServers' and a thread listening on this
     //  - the client has a single 'mClients' and makes calls to this
@@ -174,24 +176,24 @@
     // For a more complicated case, the client might itself open up a thread to
     // serve calls to the server at all times (e.g. if it hosts a callback)
 
-    wp<RpcServer> mForServer; // maybe null, for client connections
+    wp<RpcServer> mForServer; // maybe null, for client sessions
 
     // TODO(b/183988761): this shouldn't be guessable
     std::optional<int32_t> mId;
 
     std::unique_ptr<RpcState> mState;
 
-    std::mutex mSocketMutex;           // for all below
+    std::mutex mMutex; // for all below
 
-    std::condition_variable mSocketCv; // for mWaitingThreads
+    std::condition_variable mAvailableConnectionCv; // for mWaitingThreads
     size_t mWaitingThreads = 0;
     size_t mClientsOffset = 0; // hint index into clients, ++ when sending an async transaction
-    std::vector<sp<ConnectionSocket>> mClients;
-    std::vector<sp<ConnectionSocket>> mServers;
+    std::vector<sp<RpcConnection>> mClients;
+    std::vector<sp<RpcConnection>> mServers;
 
-    // TODO(b/185167543): use for reverse connections (allow client to also
-    // serve calls on a connection).
-    // TODO(b/185167543): allow sharing between different connections in a
+    // TODO(b/185167543): use for reverse sessions (allow client to also
+    // serve calls on a session).
+    // TODO(b/185167543): allow sharing between different sessions in a
     // process? (or combine with mServers)
     std::map<std::thread::id, std::thread> mThreads;
 };
diff --git a/libs/binder/parcel_fuzzer/main.cpp b/libs/binder/parcel_fuzzer/main.cpp
index 332e2ad..a47b753 100644
--- a/libs/binder/parcel_fuzzer/main.cpp
+++ b/libs/binder/parcel_fuzzer/main.cpp
@@ -23,7 +23,7 @@
 #include <iostream>
 
 #include <android-base/logging.h>
-#include <binder/RpcConnection.h>
+#include <binder/RpcSession.h>
 #include <fuzzbinder/random_parcel.h>
 #include <fuzzer/FuzzedDataProvider.h>
 
@@ -33,7 +33,7 @@
 #include <sys/time.h>
 
 using android::fillRandomParcel;
-using android::RpcConnection;
+using android::RpcSession;
 using android::sp;
 
 void fillRandomParcel(::android::hardware::Parcel* p, FuzzedDataProvider&& provider) {
@@ -61,9 +61,9 @@
     P p;
     if constexpr (std::is_same_v<P, android::Parcel>) {
         if (provider.ConsumeBool()) {
-            auto connection = sp<RpcConnection>::make();
-            CHECK(connection->addNullDebuggingClient());
-            p.markForRpc(connection);
+            auto session = sp<RpcSession>::make();
+            CHECK(session->addNullDebuggingClient());
+            p.markForRpc(session);
             fillRandomParcelData(&p, std::move(provider));
         } else {
             fillRandomParcel(&p, std::move(provider));
diff --git a/libs/binder/tests/IBinderRpcTest.aidl b/libs/binder/tests/IBinderRpcTest.aidl
index 814e094..ef4198d 100644
--- a/libs/binder/tests/IBinderRpcTest.aidl
+++ b/libs/binder/tests/IBinderRpcTest.aidl
@@ -18,7 +18,7 @@
     oneway void sendString(@utf8InCpp String str);
     @utf8InCpp String doubleString(@utf8InCpp String str);
 
-    // number of known RPC binders to process, RpcState::countBinders by connection
+    // number of known RPC binders to process, RpcState::countBinders by session
     int[] countBinders();
 
     // Caller sends server, callee pings caller's server and returns error code.
@@ -36,7 +36,7 @@
     // should always return the same binder
     IBinder alwaysGiveMeTheSameBinder();
 
-    // Idea is that the server will not hold onto the session, the remote connection
+    // Idea is that the server will not hold onto the session, the remote session
     // object must. This is to test lifetimes of binder objects, and consequently, also
     // identity (since by assigning sessions names, we can make sure a section always
     // references the session it was originally opened with).
diff --git a/libs/binder/tests/binderRpcBenchmark.cpp b/libs/binder/tests/binderRpcBenchmark.cpp
index f64bc5b..a457e67 100644
--- a/libs/binder/tests/binderRpcBenchmark.cpp
+++ b/libs/binder/tests/binderRpcBenchmark.cpp
@@ -18,8 +18,8 @@
 #include <android-base/logging.h>
 #include <benchmark/benchmark.h>
 #include <binder/Binder.h>
-#include <binder/RpcConnection.h>
 #include <binder/RpcServer.h>
+#include <binder/RpcSession.h>
 
 #include <thread>
 
@@ -30,8 +30,8 @@
 using android::IBinder;
 using android::interface_cast;
 using android::OK;
-using android::RpcConnection;
 using android::RpcServer;
+using android::RpcSession;
 using android::sp;
 using android::binder::Status;
 
@@ -46,17 +46,17 @@
     }
 };
 
-static sp<RpcConnection> gConnection = RpcConnection::make();
+static sp<RpcSession> gSession = RpcSession::make();
 
 void BM_getRootObject(benchmark::State& state) {
     while (state.KeepRunning()) {
-        CHECK(gConnection->getRootObject() != nullptr);
+        CHECK(gSession->getRootObject() != nullptr);
     }
 }
 BENCHMARK(BM_getRootObject);
 
 void BM_pingTransaction(benchmark::State& state) {
-    sp<IBinder> binder = gConnection->getRootObject();
+    sp<IBinder> binder = gSession->getRootObject();
     CHECK(binder != nullptr);
 
     while (state.KeepRunning()) {
@@ -66,7 +66,7 @@
 BENCHMARK(BM_pingTransaction);
 
 void BM_repeatString(benchmark::State& state) {
-    sp<IBinder> binder = gConnection->getRootObject();
+    sp<IBinder> binder = gSession->getRootObject();
     CHECK(binder != nullptr);
     sp<IBinderRpcBenchmark> iface = interface_cast<IBinderRpcBenchmark>(binder);
     CHECK(iface != nullptr);
@@ -95,7 +95,7 @@
 BENCHMARK(BM_repeatString);
 
 void BM_repeatBinder(benchmark::State& state) {
-    sp<IBinder> binder = gConnection->getRootObject();
+    sp<IBinder> binder = gSession->getRootObject();
     CHECK(binder != nullptr);
     sp<IBinderRpcBenchmark> iface = interface_cast<IBinderRpcBenchmark>(binder);
     CHECK(iface != nullptr);
@@ -128,7 +128,7 @@
 
     for (size_t tries = 0; tries < 5; tries++) {
         usleep(10000);
-        if (gConnection->setupUnixDomainClient(addr.c_str())) goto success;
+        if (gSession->setupUnixDomainClient(addr.c_str())) goto success;
     }
     LOG(FATAL) << "Could not connect.";
 success:
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index 50bff91..8d10727 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -25,8 +25,8 @@
 #include <binder/BpBinder.h>
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
-#include <binder/RpcConnection.h>
 #include <binder/RpcServer.h>
+#include <binder/RpcSession.h>
 #include <gtest/gtest.h>
 
 #include <chrono>
@@ -94,12 +94,12 @@
             return Status::fromExceptionCode(Status::EX_NULL_POINTER);
         }
         out->clear();
-        for (auto connection : spServer->listConnections()) {
-            size_t count = connection->state()->countBinders();
+        for (auto session : spServer->listSessions()) {
+            size_t count = session->state()->countBinders();
             if (count != 1) {
                 // this is called when there is only one binder held remaining,
                 // so to aid debugging
-                connection->state()->dump();
+                session->state()->dump();
             }
             out->push_back(count);
         }
@@ -225,61 +225,60 @@
     return temp + "/binderRpcTest_" + std::to_string(id++);
 };
 
-struct ProcessConnection {
+struct ProcessSession {
     // reference to process hosting a socket server
     Process host;
 
-    struct ConnectionInfo {
-        sp<RpcConnection> connection;
+    struct SessionInfo {
+        sp<RpcSession> session;
         sp<IBinder> root;
     };
 
-    // client connection objects associated with other process
-    // each one represents a separate connection
-    std::vector<ConnectionInfo> connections;
+    // client session objects associated with other process
+    // each one represents a separate session
+    std::vector<SessionInfo> sessions;
 
-    ProcessConnection(ProcessConnection&&) = default;
-    ~ProcessConnection() {
-        for (auto& connection : connections) {
-            connection.root = nullptr;
+    ProcessSession(ProcessSession&&) = default;
+    ~ProcessSession() {
+        for (auto& session : sessions) {
+            session.root = nullptr;
         }
 
-        for (auto& info : connections) {
-            sp<RpcConnection>& connection = info.connection;
+        for (auto& info : sessions) {
+            sp<RpcSession>& session = info.session;
 
-            EXPECT_NE(nullptr, connection);
-            EXPECT_NE(nullptr, connection->state());
-            EXPECT_EQ(0, connection->state()->countBinders())
-                    << (connection->state()->dump(), "dump:");
+            EXPECT_NE(nullptr, session);
+            EXPECT_NE(nullptr, session->state());
+            EXPECT_EQ(0, session->state()->countBinders()) << (session->state()->dump(), "dump:");
 
-            wp<RpcConnection> weakConnection = connection;
-            connection = nullptr;
-            EXPECT_EQ(nullptr, weakConnection.promote()) << "Leaked connection";
+            wp<RpcSession> weakSession = session;
+            session = nullptr;
+            EXPECT_EQ(nullptr, weakSession.promote()) << "Leaked session";
         }
     }
 };
 
-// Process connection where the process hosts IBinderRpcTest, the server used
+// Process session where the process hosts IBinderRpcTest, the server used
 // for most testing here
-struct BinderRpcTestProcessConnection {
-    ProcessConnection proc;
+struct BinderRpcTestProcessSession {
+    ProcessSession proc;
 
-    // pre-fetched root object (for first connection)
+    // pre-fetched root object (for first session)
     sp<IBinder> rootBinder;
 
-    // pre-casted root object (for first connection)
+    // pre-casted root object (for first session)
     sp<IBinderRpcTest> rootIface;
 
-    // whether connection should be invalidated by end of run
+    // whether session should be invalidated by end of run
     bool expectInvalid = false;
 
-    BinderRpcTestProcessConnection(BinderRpcTestProcessConnection&&) = default;
-    ~BinderRpcTestProcessConnection() {
+    BinderRpcTestProcessSession(BinderRpcTestProcessSession&&) = default;
+    ~BinderRpcTestProcessSession() {
         if (!expectInvalid) {
             std::vector<int32_t> remoteCounts;
-            // calling over any connections counts across all connections
+            // calling over any sessions counts across all sessions
             EXPECT_OK(rootIface->countBinders(&remoteCounts));
-            EXPECT_EQ(remoteCounts.size(), proc.connections.size());
+            EXPECT_EQ(remoteCounts.size(), proc.sessions.size());
             for (auto remoteCount : remoteCounts) {
                 EXPECT_EQ(remoteCount, 1);
             }
@@ -316,10 +315,10 @@
 public:
     // This creates a new process serving an interface on a certain number of
     // threads.
-    ProcessConnection createRpcTestSocketServerProcess(
-            size_t numThreads, size_t numConnections,
+    ProcessSession createRpcTestSocketServerProcess(
+            size_t numThreads, size_t numSessions,
             const std::function<void(const sp<RpcServer>&)>& configure) {
-        CHECK_GE(numConnections, 1) << "Must have at least one connection to a server";
+        CHECK_GE(numSessions, 1) << "Must have at least one session to a server";
 
         SocketType socketType = GetParam();
 
@@ -328,7 +327,7 @@
         static unsigned int vsockPort = 3456;
         vsockPort++;
 
-        auto ret = ProcessConnection{
+        auto ret = ProcessSession{
                 .host = Process([&](Pipe* pipe) {
                     sp<RpcServer> server = RpcServer::make();
 
@@ -369,21 +368,21 @@
             CHECK_NE(0, inetPort);
         }
 
-        for (size_t i = 0; i < numConnections; i++) {
-            sp<RpcConnection> connection = RpcConnection::make();
+        for (size_t i = 0; i < numSessions; i++) {
+            sp<RpcSession> session = RpcSession::make();
             for (size_t tries = 0; tries < 10; tries++) {
                 usleep(10000);
                 switch (socketType) {
                     case SocketType::UNIX:
-                        if (connection->setupUnixDomainClient(addr.c_str())) goto success;
+                        if (session->setupUnixDomainClient(addr.c_str())) goto success;
                         break;
 #ifdef __BIONIC__
                     case SocketType::VSOCK:
-                        if (connection->setupVsockClient(VMADDR_CID_LOCAL, vsockPort)) goto success;
+                        if (session->setupVsockClient(VMADDR_CID_LOCAL, vsockPort)) goto success;
                         break;
 #endif // __BIONIC__
                     case SocketType::INET:
-                        if (connection->setupInetClient("127.0.0.1", inetPort)) goto success;
+                        if (session->setupInetClient("127.0.0.1", inetPort)) goto success;
                         break;
                     default:
                         LOG_ALWAYS_FATAL("Unknown socket type");
@@ -391,15 +390,15 @@
             }
             LOG_ALWAYS_FATAL("Could not connect");
         success:
-            ret.connections.push_back({connection, connection->getRootObject()});
+            ret.sessions.push_back({session, session->getRootObject()});
         }
         return ret;
     }
 
-    BinderRpcTestProcessConnection createRpcTestSocketServerProcess(size_t numThreads,
-                                                                    size_t numConnections = 1) {
-        BinderRpcTestProcessConnection ret{
-                .proc = createRpcTestSocketServerProcess(numThreads, numConnections,
+    BinderRpcTestProcessSession createRpcTestSocketServerProcess(size_t numThreads,
+                                                                 size_t numSessions = 1) {
+        BinderRpcTestProcessSession ret{
+                .proc = createRpcTestSocketServerProcess(numThreads, numSessions,
                                                          [&](const sp<RpcServer>& server) {
                                                              sp<MyBinderRpcTest> service =
                                                                      new MyBinderRpcTest;
@@ -408,7 +407,7 @@
                                                          }),
         };
 
-        ret.rootBinder = ret.proc.connections.at(0).root;
+        ret.rootBinder = ret.proc.sessions.at(0).root;
         ret.rootIface = interface_cast<IBinderRpcTest>(ret.rootBinder);
 
         return ret;
@@ -421,7 +420,7 @@
         server->setRootObject(nullptr);
     });
 
-    EXPECT_EQ(nullptr, proc.connections.at(0).root);
+    EXPECT_EQ(nullptr, proc.sessions.at(0).root);
 }
 
 TEST_P(BinderRpc, Ping) {
@@ -436,11 +435,11 @@
     EXPECT_EQ(IBinderRpcTest::descriptor, proc.rootBinder->getInterfaceDescriptor());
 }
 
-TEST_P(BinderRpc, MultipleConnections) {
-    auto proc = createRpcTestSocketServerProcess(1 /*threads*/, 5 /*connections*/);
-    for (auto connection : proc.proc.connections) {
-        ASSERT_NE(nullptr, connection.root);
-        EXPECT_EQ(OK, connection.root->pingBinder());
+TEST_P(BinderRpc, MultipleSessions) {
+    auto proc = createRpcTestSocketServerProcess(1 /*threads*/, 5 /*sessions*/);
+    for (auto session : proc.proc.sessions) {
+        ASSERT_NE(nullptr, session.root);
+        EXPECT_EQ(OK, session.root->pingBinder());
     }
 }
 
@@ -582,7 +581,7 @@
 // These are behavioral differences form regular binder, where certain usecases
 // aren't supported.
 
-TEST_P(BinderRpc, CannotMixBindersBetweenUnrelatedSocketConnections) {
+TEST_P(BinderRpc, CannotMixBindersBetweenUnrelatedSocketSessions) {
     auto proc1 = createRpcTestSocketServerProcess(1);
     auto proc2 = createRpcTestSocketServerProcess(1);
 
@@ -591,12 +590,12 @@
               proc1.rootIface->repeatBinder(proc2.rootBinder, &outBinder).transactionError());
 }
 
-TEST_P(BinderRpc, CannotMixBindersBetweenTwoConnectionsToTheSameServer) {
-    auto proc = createRpcTestSocketServerProcess(1 /*threads*/, 2 /*connections*/);
+TEST_P(BinderRpc, CannotMixBindersBetweenTwoSessionsToTheSameServer) {
+    auto proc = createRpcTestSocketServerProcess(1 /*threads*/, 2 /*sessions*/);
 
     sp<IBinder> outBinder;
     EXPECT_EQ(INVALID_OPERATION,
-              proc.rootIface->repeatBinder(proc.proc.connections.at(1).root, &outBinder)
+              proc.rootIface->repeatBinder(proc.proc.sessions.at(1).root, &outBinder)
                       .transactionError());
 }