libbinder: RPC sock addr associated with RpcServer

This is in preparation for RpcServer having a single thread which loops
w/ accept4. With this CL, since this is moved, there is no way to
identify which thread comes with which client, so the ability to have
multiple clients is temporarily limited (fixed in CL above this). Either
way, nothing currently needs this functionality.

Bug: 185167543
Test: binderRpcTest
Change-Id: I48821970f7cbcb3fec0df00465296072d96db608
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 8f2805f..0753b54 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -27,10 +27,13 @@
 #include <log/log.h>
 #include "RpcState.h"
 
+#include "RpcSocketAddress.h"
 #include "RpcWireFormat.h"
 
 namespace android {
 
+using base::unique_fd;
+
 RpcServer::RpcServer() {}
 RpcServer::~RpcServer() {}
 
@@ -42,14 +45,63 @@
     mAgreedExperimental = true;
 }
 
+bool RpcServer::setupUnixDomainServer(const char* path) {
+    return setupSocketServer(UnixSocketAddress(path));
+}
+
+#ifdef __BIONIC__
+
+bool RpcServer::setupVsockServer(unsigned int port) {
+    // realizing value w/ this type at compile time to avoid ubsan abort
+    constexpr unsigned int kAnyCid = VMADDR_CID_ANY;
+
+    return setupSocketServer(VsockSocketAddress(kAnyCid, port));
+}
+
+#endif // __BIONIC__
+
+bool RpcServer::setupInetServer(unsigned int port, unsigned int* assignedPort) {
+    const char* kAddr = "127.0.0.1";
+
+    if (assignedPort != nullptr) *assignedPort = 0;
+    auto aiStart = InetSocketAddress::getAddrInfo(kAddr, port);
+    if (aiStart == nullptr) return false;
+    for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
+        InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, kAddr, port);
+        if (!setupSocketServer(socketAddress)) {
+            continue;
+        }
+
+        LOG_ALWAYS_FATAL_IF(socketAddress.addr()->sa_family != AF_INET, "expecting inet");
+        sockaddr_in addr{};
+        socklen_t len = sizeof(addr);
+        if (0 != getsockname(mServer.get(), reinterpret_cast<sockaddr*>(&addr), &len)) {
+            int savedErrno = errno;
+            ALOGE("Could not getsockname at %s: %s", socketAddress.toString().c_str(),
+                  strerror(savedErrno));
+            return false;
+        }
+        LOG_ALWAYS_FATAL_IF(len != sizeof(addr), "Wrong socket type: len %zu vs len %zu",
+                            static_cast<size_t>(len), sizeof(addr));
+        unsigned int realPort = ntohs(addr.sin_port);
+        LOG_ALWAYS_FATAL_IF(port != 0 && realPort != port,
+                            "Requesting inet server on %s but it is set up on %u.",
+                            socketAddress.toString().c_str(), realPort);
+
+        if (assignedPort != nullptr) {
+            *assignedPort = realPort;
+        }
+
+        return true;
+    }
+    ALOGE("None of the socket address resolved for %s:%u can be set up as inet server.", kAddr,
+          port);
+    return false;
+}
+
 void RpcServer::setMaxThreads(size_t threads) {
     LOG_ALWAYS_FATAL_IF(threads <= 0, "RpcServer is useless without threads");
-    {
-        // this lock should only ever be needed in the error case
-        std::lock_guard<std::mutex> _l(mLock);
-        LOG_ALWAYS_FATAL_IF(mConnections.size() > 0,
-                            "Must specify max threads before creating a connection");
-    }
+    LOG_ALWAYS_FATAL_IF(mStarted, "must be called before started");
     mMaxThreads = threads;
 }
 
@@ -67,30 +119,36 @@
     return mRootObject;
 }
 
-sp<RpcConnection> RpcServer::addClientConnection() {
+void RpcServer::join() {
     LOG_ALWAYS_FATAL_IF(!mAgreedExperimental, "no!");
 
-    auto connection = RpcConnection::make();
-    connection->setForServer(sp<RpcServer>::fromExisting(this));
-    {
-        std::lock_guard<std::mutex> _l(mLock);
-        LOG_ALWAYS_FATAL_IF(mStarted,
-                            "currently only supports adding client connections at creation time");
-        mConnections.push_back(connection);
-    }
-    return connection;
-}
-
-void RpcServer::join() {
     std::vector<std::thread> pool;
     {
         std::lock_guard<std::mutex> _l(mLock);
+        LOG_ALWAYS_FATAL_IF(mServer.get() == -1, "RpcServer must be setup to join.");
+        // TODO(b/185167543): support more than one client at once
+        mConnection = RpcConnection::make();
+        mConnection->setForServer(sp<RpcServer>::fromExisting(this));
+
         mStarted = true;
-        for (const sp<RpcConnection>& connection : mConnections) {
             for (size_t i = 0; i < mMaxThreads; i++) {
-                pool.push_back(std::thread([=] { connection->join(); }));
+                pool.push_back(std::thread([=] {
+                    // TODO(b/185167543): do this dynamically, instead of from a static number
+                    // of threads
+                    unique_fd clientFd(TEMP_FAILURE_RETRY(
+                            accept4(mServer.get(), nullptr, 0 /*length*/, SOCK_CLOEXEC)));
+                    if (clientFd < 0) {
+                        // If this log becomes confusing, should save more state from
+                        // setupUnixDomainServer in order to output here.
+                        ALOGE("Could not accept4 socket: %s", strerror(errno));
+                        return;
+                    }
+
+                    LOG_RPC_DETAIL("accept4 on fd %d yields fd %d", mServer.get(), clientFd.get());
+
+                    mConnection->join(std::move(clientFd));
+                }));
             }
-        }
     }
 
     // TODO(b/185167543): don't waste extra thread for join, and combine threads
@@ -98,4 +156,39 @@
     for (auto& t : pool) t.join();
 }
 
+std::vector<sp<RpcConnection>> RpcServer::listConnections() {
+    std::lock_guard<std::mutex> _l(mLock);
+    if (mConnection == nullptr) return {};
+    return {mConnection};
+}
+
+bool RpcServer::setupSocketServer(const RpcSocketAddress& addr) {
+    {
+        std::lock_guard<std::mutex> _l(mLock);
+        LOG_ALWAYS_FATAL_IF(mServer.get() != -1, "Each RpcServer can only have one server.");
+    }
+
+    unique_fd serverFd(
+            TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
+    if (serverFd == -1) {
+        ALOGE("Could not create socket: %s", strerror(errno));
+        return false;
+    }
+
+    if (0 != TEMP_FAILURE_RETRY(bind(serverFd.get(), addr.addr(), addr.addrSize()))) {
+        int savedErrno = errno;
+        ALOGE("Could not bind socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
+        return false;
+    }
+
+    if (0 != TEMP_FAILURE_RETRY(listen(serverFd.get(), 1 /*backlog*/))) {
+        int savedErrno = errno;
+        ALOGE("Could not listen socket at %s: %s", addr.toString().c_str(), strerror(savedErrno));
+        return false;
+    }
+
+    mServer = std::move(serverFd);
+    return true;
+}
+
 } // namespace android