libbinder: finalize connect/server APIs

Before, you needed to manually setup the required number of sockets on
the client and server sides of a connection and manually setup threads.
Now, you configure the thread count on RpcServer and call join once, and
on the client side, you connect once, and the connection figured out how
many connections it will make.

Now, we will be able to manage how these sockets/threads get setup
without affecting any client code in various tests.

So, a server looks like this:

    sp<RpcServer> server = RpcServer::make();
    // still until we are ready to open this up
    server->iUnderstandThisCodeIsExperimentalAndIWillNotUseItInProduction();
    server->setMaxThreads(3 /* for example */);
    // call this for each client (currently this must be setup in
    // advance)
    sp<RpcConnection> connection = server->addClientConnection();
    // other server types are supported
    if (!connection->setupInetServer(1234 /*some port*/)) .. error ..
    // process requests for each client
    server->join();

And a client looks like this:

    sp<RpcConnection> connection = RpcConnection::make();
    if (!connection->setupInetClient(/*some IP address*/, 1234 /*some port*/))
        .. error ..

The above code will create 3 threads on the server serving 3 separate
socket connections that the client can use to make up to 3 simultaneous
sets of syncrhonous calls (this can't be shared because the sockets may
be needed for binder socket calls).

This means that each address (ip + port) in this case can server a single process.

Future considerations:
- if we wanted, we could dynamically setup this connection, so that
  extra threads and sockets are only created as needed. This would be at
  parity with binder, but also it opens up the possibility for later
  errors. TODOs are added in the code for this.
- a single server should be able to share a threadpool between multiple
  clients. Currently a new threadpool is created for each client.
- new client connections should be able to be setup dynamically.
  Currently, once the threadpool is started, we don't support making
  more connections, but we should.

Bug: 185167543
Test: binderRpcTest
Change-Id: I4c11ab64bf7c1c19ca67f6a1c4be21de52358a5c
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index ce69ea2..f3ec904 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -298,8 +298,6 @@
     ProcessConnection createRpcTestSocketServerProcess(
             size_t numThreads,
             const std::function<void(const sp<RpcServer>&, const sp<RpcConnection>&)>& configure) {
-        CHECK_GT(numThreads, 0);
-
         SocketType socketType = GetParam();
 
         std::string addr = allocateSocketAddress();
@@ -312,6 +310,7 @@
                     sp<RpcServer> server = RpcServer::make();
 
                     server->iUnderstandThisCodeIsExperimentalAndIWillNotUseItInProduction();
+                    server->setMaxThreads(numThreads);
 
                     // server supporting one client on one socket
                     sp<RpcConnection> connection = server->addClientConnection();
@@ -339,13 +338,7 @@
 
                     configure(server, connection);
 
-                    // accept 'numThreads' connections
-                    std::vector<std::thread> pool;
-                    for (size_t i = 0; i + 1 < numThreads; i++) {
-                        pool.push_back(std::thread([=] { connection->join(); }));
-                    }
-                    connection->join();
-                    for (auto& t : pool) t.join();
+                    server->join();
                 }),
                 .connection = RpcConnection::make(),
         };
@@ -358,29 +351,26 @@
         }
 
         // create remainder of connections
-        for (size_t i = 0; i < numThreads; i++) {
-            for (size_t tries = 0; tries < 5; tries++) {
-                usleep(10000);
-                switch (socketType) {
-                    case SocketType::UNIX:
-                        if (ret.connection->addUnixDomainClient(addr.c_str())) goto success;
-                        break;
+        for (size_t tries = 0; tries < 10; tries++) {
+            usleep(10000);
+            switch (socketType) {
+                case SocketType::UNIX:
+                    if (ret.connection->setupUnixDomainClient(addr.c_str())) goto success;
+                    break;
 #ifdef __BIONIC__
-                    case SocketType::VSOCK:
-                        if (ret.connection->addVsockClient(VMADDR_CID_LOCAL, vsockPort))
-                            goto success;
-                        break;
+                case SocketType::VSOCK:
+                    if (ret.connection->setupVsockClient(VMADDR_CID_LOCAL, vsockPort)) goto success;
+                    break;
 #endif // __BIONIC__
-                    case SocketType::INET:
-                        if (ret.connection->addInetClient("127.0.0.1", inetPort)) goto success;
-                        break;
-                    default:
-                        LOG_ALWAYS_FATAL("Unknown socket type");
-                }
+                case SocketType::INET:
+                    if (ret.connection->setupInetClient("127.0.0.1", inetPort)) goto success;
+                    break;
+                default:
+                    LOG_ALWAYS_FATAL("Unknown socket type");
             }
-            LOG_ALWAYS_FATAL("Could not connect");
-        success:;
         }
+        LOG_ALWAYS_FATAL("Could not connect");
+    success:
 
         ret.rootBinder = ret.connection->getRootObject();
         return ret;