[rpc_binder] Implement RPC binder over init-managed Unix domain socket

This implements an RPC binder over init-managed Unix domain sockets.
The cl adds binder tests and the new API is also used for
vm_payload_service inside Microdroid.
A previous cl aosp/2229557 sets up the binder over anonymous Unix sockets.

Test: atest MicrodroidTests ComposHostTestCases
Bug: 222479468
Change-Id: I0c7c38f4792c4536f5f88eb7e035091505f782f7
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 83d0de7..399667d 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -564,6 +564,29 @@
     return OK;
 }
 
+status_t RpcServer::setupRawSocketServer(base::unique_fd socket_fd) {
+    RpcTransportFd transportFd(std::move(socket_fd));
+    if (!transportFd.fd.ok()) {
+        int savedErrno = errno;
+        ALOGE("Could not get initialized Unix socket: %s", strerror(savedErrno));
+        return -savedErrno;
+    }
+    // Right now, we create all threads at once, making accept4 slow. To avoid hanging the client,
+    // the backlog is increased to a large number.
+    // TODO(b/189955605): Once we create threads dynamically & lazily, the backlog can be reduced
+    //  to 1.
+    if (0 != TEMP_FAILURE_RETRY(listen(transportFd.fd.get(), 50 /*backlog*/))) {
+        int savedErrno = errno;
+        ALOGE("Could not listen initialized Unix socket: %s", strerror(savedErrno));
+        return -savedErrno;
+    }
+    if (status_t status = setupExternalServer(std::move(transportFd.fd)); status != OK) {
+        ALOGE("Another thread has set up server while calling setupSocketServer. Race?");
+        return status;
+    }
+    return OK;
+}
+
 void RpcServer::onSessionAllIncomingThreadsEnded(const sp<RpcSession>& session) {
     const std::vector<uint8_t>& id = session->mId;
     LOG_ALWAYS_FATAL_IF(id.empty(), "Server sessions must be initialized with ID");