Merge "libbinder: do not always compute open ashmem size"
diff --git a/cmds/service/Android.bp b/cmds/service/Android.bp
index 3e8e3f6..21ac11b 100644
--- a/cmds/service/Android.bp
+++ b/cmds/service/Android.bp
@@ -52,3 +52,21 @@
         "-Werror",
     ],
 }
+
+cc_binary_host {
+    name: "aservice",
+
+    srcs: ["service.cpp"],
+
+    shared_libs: [
+        "libcutils",
+        "libutils",
+        "libbinder",
+    ],
+
+    cflags: [
+        "-DXP_UNIX",
+        "-Wall",
+        "-Werror",
+    ],
+}
diff --git a/cmds/service/service.cpp b/cmds/service/service.cpp
index 18b6b58..0b00c2d 100644
--- a/cmds/service/service.cpp
+++ b/cmds/service/service.cpp
@@ -50,6 +50,7 @@
 {
     if (service != nullptr) {
         Parcel data, reply;
+        data.markForBinder(service);
         status_t err = service->transact(IBinder::INTERFACE_TRANSACTION, data, &reply);
         if (err == NO_ERROR) {
             return reply.readString16();
@@ -96,6 +97,9 @@
 #ifdef VENDORSERVICES
     ProcessState::initWithDriver("/dev/vndbinder");
 #endif
+#ifndef __ANDROID__
+    setDefaultServiceManager(createRpcDelegateServiceManager({.maxOutgoingThreads = 1}));
+#endif
     sp<IServiceManager> sm = defaultServiceManager();
     fflush(stdout);
     if (sm == nullptr) {
@@ -138,6 +142,7 @@
                 int32_t code = atoi(argv[optind++]);
                 if (service != nullptr && ifName.size() > 0) {
                     Parcel data, reply;
+                    data.markForBinder(service);
 
                     // the interface name is first
                     data.writeInterfaceToken(ifName);
@@ -229,7 +234,7 @@
                             int afd = ashmem_create_region("test", statbuf.st_size);
                             void* ptr = mmap(NULL, statbuf.st_size,
                                    PROT_READ | PROT_WRITE, MAP_SHARED, afd, 0);
-                            read(fd, ptr, statbuf.st_size);
+                            (void)read(fd, ptr, statbuf.st_size);
                             close(fd);
                             data.writeFileDescriptor(afd, true /* take ownership */);
                         } else if (strcmp(argv[optind], "nfd") == 0) {
diff --git a/data/etc/Android.bp b/data/etc/Android.bp
index 235990a..5fe4ea1 100644
--- a/data/etc/Android.bp
+++ b/data/etc/Android.bp
@@ -185,6 +185,12 @@
 }
 
 prebuilt_etc {
+    name: "android.hardware.wifi.passpoint.prebuilt.xml",
+    src: "android.hardware.wifi.passpoint.xml",
+    defaults: ["frameworks_native_data_etc_defaults"],
+}
+
+prebuilt_etc {
     name: "android.software.device_id_attestation.prebuilt.xml",
     src: "android.software.device_id_attestation.xml",
     defaults: ["frameworks_native_data_etc_defaults"],
diff --git a/libs/binder/IServiceManager.cpp b/libs/binder/IServiceManager.cpp
index aff9e0d..81e61da 100644
--- a/libs/binder/IServiceManager.cpp
+++ b/libs/binder/IServiceManager.cpp
@@ -448,21 +448,27 @@
 // on-device service manager.
 class ServiceManagerHostShim : public ServiceManagerShim {
 public:
-    using ServiceManagerShim::ServiceManagerShim;
+    ServiceManagerHostShim(const sp<AidlServiceManager>& impl,
+                           const RpcDelegateServiceManagerOptions& options)
+          : ServiceManagerShim(impl), mOptions(options) {}
     // ServiceManagerShim::getService is based on checkService, so no need to override it.
     sp<IBinder> checkService(const String16& name) const override {
-        return getDeviceService({String8(name).c_str()});
+        return getDeviceService({String8(name).c_str()}, mOptions);
     }
 
 protected:
     // Override realGetService for ServiceManagerShim::waitForService.
     Status realGetService(const std::string& name, sp<IBinder>* _aidl_return) {
-        *_aidl_return = getDeviceService({"-g", name});
+        *_aidl_return = getDeviceService({"-g", name}, mOptions);
         return Status::ok();
     }
+
+private:
+    RpcDelegateServiceManagerOptions mOptions;
 };
-sp<IServiceManager> createRpcDelegateServiceManager() {
-    auto binder = getDeviceService({"manager"});
+sp<IServiceManager> createRpcDelegateServiceManager(
+        const RpcDelegateServiceManagerOptions& options) {
+    auto binder = getDeviceService({"manager"}, options);
     if (binder == nullptr) {
         ALOGE("getDeviceService(\"manager\") returns null");
         return nullptr;
@@ -472,7 +478,7 @@
         ALOGE("getDeviceService(\"manager\") returns non service manager");
         return nullptr;
     }
-    return sp<ServiceManagerHostShim>::make(interface);
+    return sp<ServiceManagerHostShim>::make(interface, options);
 }
 #endif
 
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 44b588b..967b8e3 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -381,7 +381,7 @@
             } while (server->mSessions.end() != server->mSessions.find(sessionId));
 
             session = RpcSession::make();
-            session->setMaxThreads(server->mMaxThreads);
+            session->setMaxIncomingThreads(server->mMaxThreads);
             if (!session->setProtocolVersion(protocolVersion)) return;
             if (!session->setForServer(server,
                                        sp<RpcServer::EventListener>::fromExisting(
diff --git a/libs/binder/RpcSession.cpp b/libs/binder/RpcSession.cpp
index 4465b8e..9eef3e8 100644
--- a/libs/binder/RpcSession.cpp
+++ b/libs/binder/RpcSession.cpp
@@ -61,7 +61,7 @@
     LOG_RPC_DETAIL("RpcSession destroyed %p", this);
 
     std::lock_guard<std::mutex> _l(mMutex);
-    LOG_ALWAYS_FATAL_IF(mThreadState.mIncomingConnections.size() != 0,
+    LOG_ALWAYS_FATAL_IF(mConnections.mIncoming.size() != 0,
                         "Should not be able to destroy a session with servers in use.");
 }
 
@@ -76,20 +76,32 @@
     return sp<RpcSession>::make(std::move(ctx));
 }
 
-void RpcSession::setMaxThreads(size_t threads) {
+void RpcSession::setMaxIncomingThreads(size_t threads) {
     std::lock_guard<std::mutex> _l(mMutex);
-    LOG_ALWAYS_FATAL_IF(!mThreadState.mOutgoingConnections.empty() ||
-                                !mThreadState.mIncomingConnections.empty(),
-                        "Must set max threads before setting up connections, but has %zu client(s) "
-                        "and %zu server(s)",
-                        mThreadState.mOutgoingConnections.size(),
-                        mThreadState.mIncomingConnections.size());
-    mMaxThreads = threads;
+    LOG_ALWAYS_FATAL_IF(!mConnections.mOutgoing.empty() || !mConnections.mIncoming.empty(),
+                        "Must set max incoming threads before setting up connections, but has %zu "
+                        "client(s) and %zu server(s)",
+                        mConnections.mOutgoing.size(), mConnections.mIncoming.size());
+    mMaxIncomingThreads = threads;
 }
 
-size_t RpcSession::getMaxThreads() {
+size_t RpcSession::getMaxIncomingThreads() {
     std::lock_guard<std::mutex> _l(mMutex);
-    return mMaxThreads;
+    return mMaxIncomingThreads;
+}
+
+void RpcSession::setMaxOutgoingThreads(size_t threads) {
+    std::lock_guard<std::mutex> _l(mMutex);
+    LOG_ALWAYS_FATAL_IF(!mConnections.mOutgoing.empty() || !mConnections.mIncoming.empty(),
+                        "Must set max outgoing threads before setting up connections, but has %zu "
+                        "client(s) and %zu server(s)",
+                        mConnections.mOutgoing.size(), mConnections.mIncoming.size());
+    mMaxOutgoingThreads = threads;
+}
+
+size_t RpcSession::getMaxOutgoingThreads() {
+    std::lock_guard<std::mutex> _l(mMutex);
+    return mMaxOutgoingThreads;
 }
 
 bool RpcSession::setProtocolVersion(uint32_t version) {
@@ -197,7 +209,7 @@
         LOG_ALWAYS_FATAL_IF(mShutdownListener == nullptr, "Shutdown listener not installed");
         mShutdownListener->waitForShutdown(_l, sp<RpcSession>::fromExisting(this));
 
-        LOG_ALWAYS_FATAL_IF(!mThreadState.mThreads.empty(), "Shutdown failed");
+        LOG_ALWAYS_FATAL_IF(!mConnections.mThreads.empty(), "Shutdown failed");
     }
 
     _l.unlock();
@@ -263,11 +275,11 @@
 
 void RpcSession::WaitForShutdownListener::waitForShutdown(std::unique_lock<std::mutex>& lock,
                                                           const sp<RpcSession>& session) {
-    while (session->mThreadState.mIncomingConnections.size() > 0) {
+    while (session->mConnections.mIncoming.size() > 0) {
         if (std::cv_status::timeout == mCv.wait_for(lock, std::chrono::seconds(1))) {
             ALOGE("Waiting for RpcSession to shut down (1s w/o progress): %zu incoming connections "
                   "still.",
-                  session->mThreadState.mIncomingConnections.size());
+                  session->mConnections.mIncoming.size());
         }
     }
 }
@@ -277,7 +289,7 @@
 
     {
         std::lock_guard<std::mutex> _l(mMutex);
-        mThreadState.mThreads[thread.get_id()] = std::move(thread);
+        mConnections.mThreads[thread.get_id()] = std::move(thread);
     }
 }
 
@@ -380,10 +392,10 @@
     sp<RpcSession::EventListener> listener;
     {
         std::lock_guard<std::mutex> _l(session->mMutex);
-        auto it = session->mThreadState.mThreads.find(std::this_thread::get_id());
-        LOG_ALWAYS_FATAL_IF(it == session->mThreadState.mThreads.end());
+        auto it = session->mConnections.mThreads.find(std::this_thread::get_id());
+        LOG_ALWAYS_FATAL_IF(it == session->mConnections.mThreads.end());
         it->second.detach();
-        session->mThreadState.mThreads.erase(it);
+        session->mConnections.mThreads.erase(it);
 
         listener = session->mEventListener.promote();
     }
@@ -414,9 +426,9 @@
                                                               bool incoming)>& connectAndInit) {
     {
         std::lock_guard<std::mutex> _l(mMutex);
-        LOG_ALWAYS_FATAL_IF(mThreadState.mOutgoingConnections.size() != 0,
+        LOG_ALWAYS_FATAL_IF(mConnections.mOutgoing.size() != 0,
                             "Must only setup session once, but already has %zu clients",
-                            mThreadState.mOutgoingConnections.size());
+                            mConnections.mOutgoing.size());
     }
 
     if (auto status = initShutdownTrigger(); status != OK) return status;
@@ -439,7 +451,7 @@
         // downgrade again
         mProtocolVersion = oldProtocolVersion;
 
-        mThreadState = {};
+        mConnections = {};
     });
 
     if (status_t status = connectAndInit({}, false /*incoming*/); status != OK) return status;
@@ -475,6 +487,12 @@
         return status;
     }
 
+    size_t outgoingThreads = std::min(numThreadsAvailable, mMaxOutgoingThreads);
+    ALOGI_IF(outgoingThreads != numThreadsAvailable,
+             "Server hints client to start %zu outgoing threads, but client will only start %zu "
+             "because it is preconfigured to start at most %zu outgoing threads.",
+             numThreadsAvailable, outgoingThreads, mMaxOutgoingThreads);
+
     // TODO(b/189955605): we should add additional sessions dynamically
     // instead of all at once - the other side should be responsible for setting
     // up additional connections. We need to create at least one (unless 0 are
@@ -482,11 +500,14 @@
     // any requests at all.
 
     // we've already setup one client
-    for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
+    LOG_RPC_DETAIL("RpcSession::setupClient() instantiating %zu outgoing (server max: %zu) and %zu "
+                   "incoming threads",
+                   outgoingThreads, numThreadsAvailable, mMaxIncomingThreads);
+    for (size_t i = 0; i + 1 < outgoingThreads; i++) {
         if (status_t status = connectAndInit(mId, false /*incoming*/); status != OK) return status;
     }
 
-    for (size_t i = 0; i < mMaxThreads; i++) {
+    for (size_t i = 0; i < mMaxIncomingThreads; i++) {
         if (status_t status = connectAndInit(mId, true /*incoming*/); status != OK) return status;
     }
 
@@ -662,7 +683,7 @@
         std::lock_guard<std::mutex> _l(mMutex);
         connection->rpcTransport = std::move(rpcTransport);
         connection->exclusiveTid = gettid();
-        mThreadState.mOutgoingConnections.push_back(connection);
+        mConnections.mOutgoing.push_back(connection);
     }
 
     status_t status = OK;
@@ -699,9 +720,9 @@
         std::unique_ptr<RpcTransport> rpcTransport) {
     std::lock_guard<std::mutex> _l(mMutex);
 
-    if (mThreadState.mIncomingConnections.size() >= mMaxThreads) {
+    if (mConnections.mIncoming.size() >= mMaxIncomingThreads) {
         ALOGE("Cannot add thread to session with %zu threads (max is set to %zu)",
-              mThreadState.mIncomingConnections.size(), mMaxThreads);
+              mConnections.mIncoming.size(), mMaxIncomingThreads);
         return nullptr;
     }
 
@@ -709,7 +730,7 @@
     // happens when new connections are still being established as part of a
     // very short-lived session which shuts down after it already started
     // accepting new connections.
-    if (mThreadState.mIncomingConnections.size() < mThreadState.mMaxIncomingConnections) {
+    if (mConnections.mIncoming.size() < mConnections.mMaxIncoming) {
         return nullptr;
     }
 
@@ -717,19 +738,19 @@
     session->rpcTransport = std::move(rpcTransport);
     session->exclusiveTid = gettid();
 
-    mThreadState.mIncomingConnections.push_back(session);
-    mThreadState.mMaxIncomingConnections = mThreadState.mIncomingConnections.size();
+    mConnections.mIncoming.push_back(session);
+    mConnections.mMaxIncoming = mConnections.mIncoming.size();
 
     return session;
 }
 
 bool RpcSession::removeIncomingConnection(const sp<RpcConnection>& connection) {
     std::unique_lock<std::mutex> _l(mMutex);
-    if (auto it = std::find(mThreadState.mIncomingConnections.begin(),
-                            mThreadState.mIncomingConnections.end(), connection);
-        it != mThreadState.mIncomingConnections.end()) {
-        mThreadState.mIncomingConnections.erase(it);
-        if (mThreadState.mIncomingConnections.size() == 0) {
+    if (auto it =
+                std::find(mConnections.mIncoming.begin(), mConnections.mIncoming.end(), connection);
+        it != mConnections.mIncoming.end()) {
+        mConnections.mIncoming.erase(it);
+        if (mConnections.mIncoming.size() == 0) {
             sp<EventListener> listener = mEventListener.promote();
             if (listener) {
                 _l.unlock();
@@ -754,7 +775,7 @@
     pid_t tid = gettid();
     std::unique_lock<std::mutex> _l(session->mMutex);
 
-    session->mThreadState.mWaitingThreads++;
+    session->mConnections.mWaitingThreads++;
     while (true) {
         sp<RpcConnection> exclusive;
         sp<RpcConnection> available;
@@ -762,11 +783,11 @@
         // CHECK FOR DEDICATED CLIENT SOCKET
         //
         // A server/looper should always use a dedicated connection if available
-        findConnection(tid, &exclusive, &available, session->mThreadState.mOutgoingConnections,
-                       session->mThreadState.mOutgoingConnectionsOffset);
+        findConnection(tid, &exclusive, &available, session->mConnections.mOutgoing,
+                       session->mConnections.mOutgoingOffset);
 
         // WARNING: this assumes a server cannot request its client to send
-        // a transaction, as mIncomingConnections is excluded below.
+        // a transaction, as mIncoming is excluded below.
         //
         // Imagine we have more than one thread in play, and a single thread
         // sends a synchronous, then an asynchronous command. Imagine the
@@ -776,9 +797,8 @@
         // command. So, we move to considering the second available thread
         // for subsequent calls.
         if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
-            session->mThreadState.mOutgoingConnectionsOffset =
-                    (session->mThreadState.mOutgoingConnectionsOffset + 1) %
-                    session->mThreadState.mOutgoingConnections.size();
+            session->mConnections.mOutgoingOffset = (session->mConnections.mOutgoingOffset + 1) %
+                    session->mConnections.mOutgoing.size();
         }
 
         // USE SERVING SOCKET (e.g. nested transaction)
@@ -786,7 +806,7 @@
             sp<RpcConnection> exclusiveIncoming;
             // server connections are always assigned to a thread
             findConnection(tid, &exclusiveIncoming, nullptr /*available*/,
-                           session->mThreadState.mIncomingConnections, 0 /* index hint */);
+                           session->mConnections.mIncoming, 0 /* index hint */);
 
             // asynchronous calls cannot be nested, we currently allow ref count
             // calls to be nested (so that you can use this without having extra
@@ -815,20 +835,20 @@
             break;
         }
 
-        if (session->mThreadState.mOutgoingConnections.size() == 0) {
+        if (session->mConnections.mOutgoing.size() == 0) {
             ALOGE("Session has no client connections. This is required for an RPC server to make "
                   "any non-nested (e.g. oneway or on another thread) calls. Use: %d. Server "
                   "connections: %zu",
-                  static_cast<int>(use), session->mThreadState.mIncomingConnections.size());
+                  static_cast<int>(use), session->mConnections.mIncoming.size());
             return WOULD_BLOCK;
         }
 
         LOG_RPC_DETAIL("No available connections (have %zu clients and %zu servers). Waiting...",
-                       session->mThreadState.mOutgoingConnections.size(),
-                       session->mThreadState.mIncomingConnections.size());
+                       session->mConnections.mOutgoing.size(),
+                       session->mConnections.mIncoming.size());
         session->mAvailableConnectionCv.wait(_l);
     }
-    session->mThreadState.mWaitingThreads--;
+    session->mConnections.mWaitingThreads--;
 
     return OK;
 }
@@ -867,7 +887,7 @@
     if (!mReentrant && mConnection != nullptr) {
         std::unique_lock<std::mutex> _l(mSession->mMutex);
         mConnection->exclusiveTid = std::nullopt;
-        if (mSession->mThreadState.mWaitingThreads > 0) {
+        if (mSession->mConnections.mWaitingThreads > 0) {
             _l.unlock();
             mSession->mAvailableConnectionCv.notify_one();
         }
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index ef62f20..9ba64f3 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -855,7 +855,7 @@
 
             switch (transaction->code) {
                 case RPC_SPECIAL_TRANSACT_GET_MAX_THREADS: {
-                    replyStatus = reply.writeInt32(session->getMaxThreads());
+                    replyStatus = reply.writeInt32(session->getMaxIncomingThreads());
                     break;
                 }
                 case RPC_SPECIAL_TRANSACT_GET_SESSION_ID: {
diff --git a/libs/binder/ServiceManagerHost.cpp b/libs/binder/ServiceManagerHost.cpp
index 27cc563..194254a 100644
--- a/libs/binder/ServiceManagerHost.cpp
+++ b/libs/binder/ServiceManagerHost.cpp
@@ -124,7 +124,8 @@
 
 } // namespace
 
-sp<IBinder> getDeviceService(std::vector<std::string>&& serviceDispatcherArgs) {
+sp<IBinder> getDeviceService(std::vector<std::string>&& serviceDispatcherArgs,
+                             const RpcDelegateServiceManagerOptions& options) {
     std::vector<std::string> prefix{"adb", "shell", "servicedispatcher"};
     serviceDispatcherArgs.insert(serviceDispatcherArgs.begin(), prefix.begin(), prefix.end());
 
@@ -158,6 +159,10 @@
     LOG_ALWAYS_FATAL_IF(!forwardResult->hostPort().has_value());
 
     auto rpcSession = RpcSession::make();
+    if (options.maxOutgoingThreads.has_value()) {
+        rpcSession->setMaxOutgoingThreads(*options.maxOutgoingThreads);
+    }
+
     if (status_t status = rpcSession->setupInetClient("127.0.0.1", *forwardResult->hostPort());
         status != OK) {
         ALOGE("Unable to set up inet client on host port %u: %s", *forwardResult->hostPort(),
diff --git a/libs/binder/ServiceManagerHost.h b/libs/binder/ServiceManagerHost.h
index e59724c..c5310da 100644
--- a/libs/binder/ServiceManagerHost.h
+++ b/libs/binder/ServiceManagerHost.h
@@ -21,11 +21,14 @@
 
 namespace android {
 
+struct RpcDelegateServiceManagerOptions;
+
 // Get a service on device by running servicedispatcher with the given args, e.g.
 //     getDeviceService({"foo"});
 // Return nullptr on any error.
 // When the returned binder object is destroyed, remove adb forwarding and kills
 // the long-running servicedispatcher process.
-sp<IBinder> getDeviceService(std::vector<std::string>&& serviceDispatcherArgs);
+sp<IBinder> getDeviceService(std::vector<std::string>&& serviceDispatcherArgs,
+                             const RpcDelegateServiceManagerOptions& options);
 
 } // namespace android
diff --git a/libs/binder/include/binder/IServiceManager.h b/libs/binder/include/binder/IServiceManager.h
index a48075d..240e3c2 100644
--- a/libs/binder/include/binder/IServiceManager.h
+++ b/libs/binder/include/binder/IServiceManager.h
@@ -188,7 +188,16 @@
 //        // ...
 //    }
 // Resources are cleaned up when the object is destroyed.
-sp<IServiceManager> createRpcDelegateServiceManager();
+//
+// For each returned binder object, at most |maxOutgoingThreads| outgoing threads are instantiated.
+// Hence, only |maxOutgoingThreads| calls can be made simultaneously. Additional calls are blocked
+// if there are |maxOutgoingThreads| ongoing calls. See RpcSession::setMaxOutgoingThreads.
+// If |maxOutgoingThreads| is not set, default is |RpcSession::kDefaultMaxOutgoingThreads|.
+struct RpcDelegateServiceManagerOptions {
+    std::optional<size_t> maxOutgoingThreads;
+};
+sp<IServiceManager> createRpcDelegateServiceManager(
+        const RpcDelegateServiceManagerOptions& options);
 #endif
 
 } // namespace android
diff --git a/libs/binder/include/binder/RpcSession.h b/libs/binder/include/binder/RpcSession.h
index 19888b7..f5505da 100644
--- a/libs/binder/include/binder/RpcSession.h
+++ b/libs/binder/include/binder/RpcSession.h
@@ -50,6 +50,8 @@
  */
 class RpcSession final : public virtual RefBase {
 public:
+    static constexpr size_t kDefaultMaxOutgoingThreads = 10;
+
     // Create an RpcSession with default configuration (raw sockets).
     static sp<RpcSession> make();
 
@@ -59,7 +61,7 @@
     static sp<RpcSession> make(std::unique_ptr<RpcTransportCtxFactory> rpcTransportCtxFactory);
 
     /**
-     * Set the maximum number of threads allowed to be made (for things like callbacks).
+     * Set the maximum number of incoming threads allowed to be made (for things like callbacks).
      * By default, this is 0. This must be called before setting up this connection as a client.
      * Server sessions will inherits this value from RpcServer.
      *
@@ -68,8 +70,20 @@
      *
      * TODO(b/189955605): start these dynamically
      */
-    void setMaxThreads(size_t threads);
-    size_t getMaxThreads();
+    void setMaxIncomingThreads(size_t threads);
+    size_t getMaxIncomingThreads();
+
+    /**
+     * Set the maximum number of outgoing threads allowed to be made.
+     * By default, this is |kDefaultMaxOutgoingThreads|. This must be called before setting up this
+     * connection as a client.
+     *
+     * This limits the number of outgoing threads on top of the remote peer setting. This RpcSession
+     * will only instantiate |min(maxOutgoingThreads, remoteMaxThreads)| outgoing threads, where
+     * |remoteMaxThreads| can be retrieved from the remote peer via |getRemoteMaxThreads()|.
+     */
+    void setMaxOutgoingThreads(size_t threads);
+    size_t getMaxOutgoingThreads();
 
     /**
      * By default, the minimum of the supported versions of the client and the
@@ -281,13 +295,13 @@
 
     const std::unique_ptr<RpcTransportCtx> mCtx;
 
-    // On the other side of a session, for each of mOutgoingConnections here, there should
-    // be one of mIncomingConnections on the other side (and vice versa).
+    // On the other side of a session, for each of mOutgoing here, there should
+    // be one of mIncoming on the other side (and vice versa).
     //
     // For the simplest session, a single server with one client, you would
     // have:
-    //  - the server has a single 'mIncomingConnections' and a thread listening on this
-    //  - the client has a single 'mOutgoingConnections' and makes calls to this
+    //  - the server has a single 'mIncoming' and a thread listening on this
+    //  - the client has a single 'mOutgoing' and makes calls to this
     //  - here, when the client makes a call, the server can call back into it
     //    (nested calls), but outside of this, the client will only ever read
     //    calls from the server when it makes a call itself.
@@ -307,7 +321,8 @@
 
     std::mutex mMutex; // for all below
 
-    size_t mMaxThreads = 0;
+    size_t mMaxIncomingThreads = 0;
+    size_t mMaxOutgoingThreads = kDefaultMaxOutgoingThreads;
     std::optional<uint32_t> mProtocolVersion;
 
     std::condition_variable mAvailableConnectionCv; // for mWaitingThreads
@@ -315,12 +330,12 @@
     struct ThreadState {
         size_t mWaitingThreads = 0;
         // hint index into clients, ++ when sending an async transaction
-        size_t mOutgoingConnectionsOffset = 0;
-        std::vector<sp<RpcConnection>> mOutgoingConnections;
-        size_t mMaxIncomingConnections = 0;
-        std::vector<sp<RpcConnection>> mIncomingConnections;
+        size_t mOutgoingOffset = 0;
+        std::vector<sp<RpcConnection>> mOutgoing;
+        size_t mMaxIncoming = 0;
+        std::vector<sp<RpcConnection>> mIncoming;
         std::map<std::thread::id, std::thread> mThreads;
-    } mThreadState;
+    } mConnections;
 };
 
 } // namespace android
diff --git a/libs/binder/rust/Android.bp b/libs/binder/rust/Android.bp
index d9d7caf..ecb044e 100644
--- a/libs/binder/rust/Android.bp
+++ b/libs/binder/rust/Android.bp
@@ -17,6 +17,7 @@
     rustlibs: [
         "liblibc",
         "libbinder_ndk_sys",
+        "libdowncast_rs",
     ],
     host_supported: true,
     target: {
@@ -133,6 +134,7 @@
     rustlibs: [
         "liblibc",
         "libbinder_ndk_sys",
+        "libdowncast_rs",
     ],
 }
 
diff --git a/libs/binder/rust/src/binder.rs b/libs/binder/rust/src/binder.rs
index 41ceee5..854b1f9 100644
--- a/libs/binder/rust/src/binder.rs
+++ b/libs/binder/rust/src/binder.rs
@@ -23,6 +23,7 @@
 
 use std::borrow::Borrow;
 use std::cmp::Ordering;
+use std::convert::TryFrom;
 use std::ffi::{c_void, CStr, CString};
 use std::fmt;
 use std::fs::File;
@@ -70,6 +71,7 @@
 /// An interface can promise to be a stable vendor interface ([`Vintf`]), or
 /// makes no stability guarantees ([`Local`]). [`Local`] is
 /// currently the default stability.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
 pub enum Stability {
     /// Default stability, visible to other modules in the same compilation
     /// context (e.g. modules on system.img)
@@ -85,6 +87,28 @@
     }
 }
 
+impl From<Stability> for i32 {
+    fn from(stability: Stability) -> i32 {
+        use Stability::*;
+        match stability {
+            Local => 0,
+            Vintf => 1,
+        }
+    }
+}
+
+impl TryFrom<i32> for Stability {
+    type Error = StatusCode;
+    fn try_from(stability: i32) -> Result<Stability> {
+        use Stability::*;
+        match stability {
+            0 => Ok(Local),
+            1 => Ok(Vintf),
+            _ => Err(StatusCode::BAD_VALUE)
+        }
+    }
+}
+
 /// A local service that can be remotable via Binder.
 ///
 /// An object that implement this interface made be made into a Binder service
diff --git a/libs/binder/rust/src/lib.rs b/libs/binder/rust/src/lib.rs
index 7e8e3a5..d1d37d7 100644
--- a/libs/binder/rust/src/lib.rs
+++ b/libs/binder/rust/src/lib.rs
@@ -127,7 +127,7 @@
 
 /// The public API usable outside AIDL-generated interface crates.
 pub mod public_api {
-    pub use super::parcel::ParcelFileDescriptor;
+    pub use super::parcel::{ParcelFileDescriptor, ParcelableHolder};
     pub use super::{
         add_service, force_lazy_services_persist, get_interface, register_lazy_service,
         wait_for_interface,
diff --git a/libs/binder/rust/src/parcel.rs b/libs/binder/rust/src/parcel.rs
index dad89ec..7391561 100644
--- a/libs/binder/rust/src/parcel.rs
+++ b/libs/binder/rust/src/parcel.rs
@@ -29,11 +29,14 @@
 
 mod file_descriptor;
 mod parcelable;
+mod parcelable_holder;
 
 pub use self::file_descriptor::ParcelFileDescriptor;
 pub use self::parcelable::{
     Deserialize, DeserializeArray, DeserializeOption, Serialize, SerializeArray, SerializeOption,
+    Parcelable, NON_NULL_PARCELABLE_FLAG, NULL_PARCELABLE_FLAG,
 };
+pub use self::parcelable_holder::{ParcelableHolder, ParcelableMetadata};
 
 /// Container for a message (data and object references) that can be sent
 /// through Binder.
@@ -68,6 +71,21 @@
 }
 
 impl Parcel {
+    /// Create a new empty `Parcel`.
+    ///
+    /// Creates a new owned empty parcel that can be written to
+    /// using the serialization methods and appended to and
+    /// from using `append_from` and `append_from_all`.
+    pub fn new() -> Parcel {
+        let parcel = unsafe {
+            // Safety: If `AParcel_create` succeeds, it always returns
+            // a valid pointer. If it fails, the process will crash.
+            sys::AParcel_create()
+        };
+        assert!(!parcel.is_null());
+        Self::Owned(parcel)
+    }
+
     /// Create a borrowed reference to a parcel object from a raw pointer.
     ///
     /// # Safety
@@ -106,6 +124,22 @@
     }
 }
 
+impl Default for Parcel {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl Clone for Parcel {
+    fn clone(&self) -> Self {
+        let mut new_parcel = Self::new();
+        new_parcel
+            .append_all_from(self)
+            .expect("Failed to append from Parcel");
+        new_parcel
+    }
+}
+
 // Data serialization methods
 impl Parcel {
     /// Data written to parcelable is zero'd before being deleted or reallocated.
@@ -213,6 +247,30 @@
     pub unsafe fn set_data_position(&self, pos: i32) -> Result<()> {
         status_result(sys::AParcel_setDataPosition(self.as_native(), pos))
     }
+
+    /// Append a subset of another `Parcel`.
+    ///
+    /// This appends `size` bytes of data from `other` starting at offset
+    /// `start` to the current `Parcel`, or returns an error if not possible.
+    pub fn append_from(&mut self, other: &Self, start: i32, size: i32) -> Result<()> {
+        let status = unsafe {
+            // Safety: `Parcel::appendFrom` from C++ checks that `start`
+            // and `size` are in bounds, and returns an error otherwise.
+            // Both `self` and `other` always contain valid pointers.
+            sys::AParcel_appendFrom(
+                other.as_native(),
+                self.as_native_mut(),
+                start,
+                size,
+            )
+        };
+        status_result(status)
+    }
+
+    /// Append the contents of another `Parcel`.
+    pub fn append_all_from(&mut self, other: &Self) -> Result<()> {
+        self.append_from(other, 0, other.get_data_size())
+    }
 }
 
 /// A segment of a writable parcel, used for [`Parcel::sized_write`].
@@ -427,43 +485,9 @@
     }
 }
 
-#[cfg(test)]
-impl Parcel {
-    /// Create a new parcel tied to a bogus binder. TESTING ONLY!
-    ///
-    /// This can only be used for testing! All real parcel operations must be
-    /// done in the callback to [`IBinder::transact`] or in
-    /// [`Remotable::on_transact`] using the parcels provided to these methods.
-    pub(crate) fn new_for_test(binder: &mut SpIBinder) -> Result<Self> {
-        let mut input = ptr::null_mut();
-        let status = unsafe {
-            // Safety: `SpIBinder` guarantees that `binder` always contains a
-            // valid pointer to an `AIBinder`. We pass a valid, mutable out
-            // pointer to receive a newly constructed parcel. When successful
-            // this function assigns a new pointer to an `AParcel` to `input`
-            // and transfers ownership of this pointer to the caller. Thus,
-            // after this call, `input` will either be null or point to a valid,
-            // owned `AParcel`.
-            sys::AIBinder_prepareTransaction(binder.as_native_mut(), &mut input)
-        };
-        status_result(status)?;
-        unsafe {
-            // Safety: `input` is either null or a valid, owned pointer to an
-            // `AParcel`, so is valid to safe to
-            // `Parcel::owned`. `Parcel::owned` takes ownership of the parcel
-            // pointer.
-            Parcel::owned(input).ok_or(StatusCode::UNEXPECTED_NULL)
-        }
-    }
-}
-
 #[test]
 fn test_read_write() {
-    use crate::binder::Interface;
-    use crate::native::Binder;
-
-    let mut service = Binder::new(()).as_binder();
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let start = parcel.get_data_position();
 
     assert_eq!(parcel.read::<bool>(), Err(StatusCode::NOT_ENOUGH_DATA));
@@ -493,11 +517,7 @@
 #[test]
 #[allow(clippy::float_cmp)]
 fn test_read_data() {
-    use crate::binder::Interface;
-    use crate::native::Binder;
-
-    let mut service = Binder::new(()).as_binder();
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let str_start = parcel.get_data_position();
 
     parcel.write(&b"Hello, Binder!\0"[..]).unwrap();
@@ -572,11 +592,7 @@
 
 #[test]
 fn test_utf8_utf16_conversions() {
-    use crate::binder::Interface;
-    use crate::native::Binder;
-
-    let mut service = Binder::new(()).as_binder();
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let start = parcel.get_data_position();
 
     assert!(parcel.write("Hello, Binder!").is_ok());
@@ -636,11 +652,7 @@
 
 #[test]
 fn test_sized_write() {
-    use crate::binder::Interface;
-    use crate::native::Binder;
-
-    let mut service = Binder::new(()).as_binder();
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let start = parcel.get_data_position();
 
     let arr = [1i32, 2i32, 3i32];
@@ -668,3 +680,43 @@
         &arr,
     );
 }
+
+#[test]
+fn test_append_from() {
+    let mut parcel1 = Parcel::new();
+    parcel1.write(&42i32).expect("Could not perform write");
+
+    let mut parcel2 = Parcel::new();
+    assert_eq!(Ok(()), parcel2.append_all_from(&parcel1));
+    assert_eq!(4, parcel2.get_data_size());
+    assert_eq!(Ok(()), parcel2.append_all_from(&parcel1));
+    assert_eq!(8, parcel2.get_data_size());
+    unsafe {
+        parcel2.set_data_position(0).unwrap();
+    }
+    assert_eq!(Ok(42), parcel2.read::<i32>());
+    assert_eq!(Ok(42), parcel2.read::<i32>());
+
+    let mut parcel2 = Parcel::new();
+    assert_eq!(Ok(()), parcel2.append_from(&parcel1, 0, 2));
+    assert_eq!(Ok(()), parcel2.append_from(&parcel1, 2, 2));
+    assert_eq!(4, parcel2.get_data_size());
+    unsafe {
+        parcel2.set_data_position(0).unwrap();
+    }
+    assert_eq!(Ok(42), parcel2.read::<i32>());
+
+    let mut parcel2 = Parcel::new();
+    assert_eq!(Ok(()), parcel2.append_from(&parcel1, 0, 2));
+    assert_eq!(2, parcel2.get_data_size());
+    unsafe {
+        parcel2.set_data_position(0).unwrap();
+    }
+    assert_eq!(Err(StatusCode::NOT_ENOUGH_DATA), parcel2.read::<i32>());
+
+    let mut parcel2 = Parcel::new();
+    assert_eq!(Err(StatusCode::BAD_VALUE), parcel2.append_from(&parcel1, 4, 2));
+    assert_eq!(Err(StatusCode::BAD_VALUE), parcel2.append_from(&parcel1, 2, 4));
+    assert_eq!(Err(StatusCode::BAD_VALUE), parcel2.append_from(&parcel1, -1, 4));
+    assert_eq!(Err(StatusCode::BAD_VALUE), parcel2.append_from(&parcel1, 2, -1));
+}
diff --git a/libs/binder/rust/src/parcel/parcelable.rs b/libs/binder/rust/src/parcel/parcelable.rs
index 56c6165..499ef09 100644
--- a/libs/binder/rust/src/parcel/parcelable.rs
+++ b/libs/binder/rust/src/parcel/parcelable.rs
@@ -14,19 +14,42 @@
  * limitations under the License.
  */
 
-use crate::binder::{AsNative, FromIBinder, Strong};
+use crate::binder::{AsNative, FromIBinder, Stability, Strong};
 use crate::error::{status_result, status_t, Result, Status, StatusCode};
 use crate::parcel::Parcel;
 use crate::proxy::SpIBinder;
 use crate::sys;
 
-use std::convert::TryInto;
+use std::convert::{TryFrom, TryInto};
 use std::ffi::c_void;
 use std::os::raw::{c_char, c_ulong};
 use std::mem::{self, MaybeUninit};
 use std::ptr;
 use std::slice;
 
+/// Super-trait for Binder parcelables.
+///
+/// This trait is equivalent `android::Parcelable` in C++,
+/// and defines a common interface that all parcelables need
+/// to implement.
+pub trait Parcelable {
+    /// Internal serialization function for parcelables.
+    ///
+    /// This method is mainly for internal use.
+    /// `Serialize::serialize` and its variants are generally
+    /// preferred over this function, since the former also
+    /// prepend a header.
+    fn write_to_parcel(&self, parcel: &mut Parcel) -> Result<()>;
+
+    /// Internal deserialization function for parcelables.
+    ///
+    /// This method is mainly for internal use.
+    /// `Deserialize::deserialize` and its variants are generally
+    /// preferred over this function, since the former also
+    /// parse the additional header.
+    fn read_from_parcel(&mut self, parcel: &Parcel) -> Result<()>;
+}
+
 /// A struct whose instances can be written to a [`Parcel`].
 // Might be able to hook this up as a serde backend in the future?
 pub trait Serialize {
@@ -162,6 +185,18 @@
     StatusCode::OK as status_t
 }
 
+/// Flag that specifies that the following parcelable is present.
+///
+/// This is the Rust equivalent of `Parcel::kNonNullParcelableFlag`
+/// from `include/binder/Parcel.h` in C++.
+pub const NON_NULL_PARCELABLE_FLAG: i32 = 1;
+
+/// Flag that specifies that the following parcelable is absent.
+///
+/// This is the Rust equivalent of `Parcel::kNullParcelableFlag`
+/// from `include/binder/Parcel.h` in C++.
+pub const NULL_PARCELABLE_FLAG: i32 = 0;
+
 /// Helper trait for types that can be nullable when serialized.
 // We really need this trait instead of implementing `Serialize for Option<T>`
 // because of the Rust orphan rule which prevents us from doing
@@ -173,10 +208,10 @@
     /// Serialize an Option of this type into the given [`Parcel`].
     fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
         if let Some(inner) = this {
-            parcel.write(&1i32)?;
+            parcel.write(&NON_NULL_PARCELABLE_FLAG)?;
             parcel.write(inner)
         } else {
-            parcel.write(&0i32)
+            parcel.write(&NULL_PARCELABLE_FLAG)
         }
     }
 }
@@ -186,7 +221,7 @@
     /// Deserialize an Option of this type from the given [`Parcel`].
     fn deserialize_option(parcel: &Parcel) -> Result<Option<Self>> {
         let null: i32 = parcel.read()?;
-        if null == 0 {
+        if null == NULL_PARCELABLE_FLAG {
             Ok(None)
         } else {
             parcel.read().map(Some)
@@ -608,6 +643,18 @@
     }
 }
 
+impl Serialize for Stability {
+    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+        i32::from(*self).serialize(parcel)
+    }
+}
+
+impl Deserialize for Stability {
+    fn deserialize(parcel: &Parcel) -> Result<Self> {
+        i32::deserialize(parcel).and_then(Stability::try_from)
+    }
+}
+
 impl Serialize for Status {
     fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
         unsafe {
@@ -699,19 +746,53 @@
     }
 }
 
+/// Implement `Serialize` trait and friends for a parcelable
+///
+/// This is an internal macro used by the AIDL compiler to implement
+/// `Serialize`, `SerializeArray` and `SerializeOption` for
+/// structured parcelables. The target type must implement the
+/// `Parcelable` trait.
+/// ```
+#[macro_export]
+macro_rules! impl_serialize_for_parcelable {
+    ($parcelable:ident) => {
+        impl $crate::parcel::Serialize for $parcelable {
+            fn serialize(
+                &self,
+                parcel: &mut $crate::parcel::Parcel,
+            ) -> $crate::Result<()> {
+                <Self as $crate::parcel::SerializeOption>::serialize_option(
+                    Some(self),
+                    parcel,
+                )
+            }
+        }
+
+        impl $crate::parcel::SerializeArray for $parcelable {}
+
+        impl $crate::parcel::SerializeOption for $parcelable {
+            fn serialize_option(
+                this: Option<&Self>,
+                parcel: &mut $crate::parcel::Parcel,
+            ) -> $crate::Result<()> {
+                if let Some(this) = this {
+                    use $crate::parcel::Parcelable;
+                    parcel.write(&$crate::parcel::NON_NULL_PARCELABLE_FLAG)?;
+                    this.write_to_parcel(parcel)
+                } else {
+                    parcel.write(&$crate::parcel::NULL_PARCELABLE_FLAG)
+                }
+            }
+        }
+    }
+}
+
 /// Implement `Deserialize` trait and friends for a parcelable
 ///
 /// This is an internal macro used by the AIDL compiler to implement
 /// `Deserialize`, `DeserializeArray` and `DeserializeOption` for
-/// structured parcelables. The target type must implement a
-/// `deserialize_parcelable` method with the following signature:
-/// ```no_run
-/// fn deserialize_parcelable(
-///     &mut self,
-///     parcel: &binder::parcel::Parcelable,
-/// ) -> binder::Result<()> {
-///     // ...
-/// }
+/// structured parcelables. The target type must implement the
+/// `Parcelable` trait.
 /// ```
 #[macro_export]
 macro_rules! impl_deserialize_for_parcelable {
@@ -729,10 +810,11 @@
                 parcel: &$crate::parcel::Parcel,
             ) -> $crate::Result<()> {
                 let status: i32 = parcel.read()?;
-                if status == 0 {
+                if status == $crate::parcel::NULL_PARCELABLE_FLAG {
                     Err($crate::StatusCode::UNEXPECTED_NULL)
                 } else {
-                    self.deserialize_parcelable(parcel)
+                    use $crate::parcel::Parcelable;
+                    self.read_from_parcel(parcel)
                 }
             }
         }
@@ -752,12 +834,13 @@
                 parcel: &$crate::parcel::Parcel,
             ) -> $crate::Result<()> {
                 let status: i32 = parcel.read()?;
-                if status == 0 {
+                if status == $crate::parcel::NULL_PARCELABLE_FLAG {
                     *this = None;
                     Ok(())
                 } else {
+                    use $crate::parcel::Parcelable;
                     this.get_or_insert_with(Self::default)
-                        .deserialize_parcelable(parcel)
+                        .read_from_parcel(parcel)
                 }
             }
         }
@@ -790,10 +873,6 @@
 
 #[test]
 fn test_custom_parcelable() {
-    use crate::binder::Interface;
-    use crate::native::Binder;
-    let mut service = Binder::new(()).as_binder();
-
     struct Custom(u32, bool, String, Vec<String>);
 
     impl Serialize for Custom {
@@ -826,7 +905,7 @@
 
     let custom = Custom(123_456_789, true, string8, strs);
 
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let start = parcel.get_data_position();
 
     assert!(custom.serialize(&mut parcel).is_ok());
@@ -846,13 +925,9 @@
 #[test]
 #[allow(clippy::excessive_precision)]
 fn test_slice_parcelables() {
-    use crate::binder::Interface;
-    use crate::native::Binder;
-    let mut service = Binder::new(()).as_binder();
-
     let bools = [true, false, false, true];
 
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let start = parcel.get_data_position();
 
     assert!(bools.serialize(&mut parcel).is_ok());
@@ -876,7 +951,7 @@
 
     let u8s = [101u8, 255, 42, 117];
 
-    let mut parcel = Parcel::new_for_test(&mut service).unwrap();
+    let mut parcel = Parcel::new();
     let start = parcel.get_data_position();
 
     assert!(parcel.write(&u8s[..]).is_ok());
diff --git a/libs/binder/rust/src/parcel/parcelable_holder.rs b/libs/binder/rust/src/parcel/parcelable_holder.rs
new file mode 100644
index 0000000..3e75d1b
--- /dev/null
+++ b/libs/binder/rust/src/parcel/parcelable_holder.rs
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use crate::binder::Stability;
+use crate::error::{Result, StatusCode};
+use crate::parcel::{Parcel, Parcelable};
+use crate::{impl_deserialize_for_parcelable, impl_serialize_for_parcelable};
+
+use downcast_rs::{impl_downcast, Downcast};
+use std::any::Any;
+use std::cell::RefCell;
+use std::rc::Rc;
+
+/// Metadata that `ParcelableHolder` needs for all parcelables.
+///
+/// The compiler auto-generates implementations of this trait
+/// for AIDL parcelables.
+pub trait ParcelableMetadata {
+    /// The Binder parcelable descriptor string.
+    ///
+    /// This string is a unique identifier for a Binder parcelable.
+    fn get_descriptor() -> &'static str;
+
+    /// The Binder parcelable stability.
+    fn get_stability(&self) -> Stability {
+        Stability::Local
+    }
+}
+
+trait AnyParcelable: Downcast + Parcelable + std::fmt::Debug {}
+impl_downcast!(AnyParcelable);
+impl<T> AnyParcelable for T where T: Downcast + Parcelable + std::fmt::Debug {}
+
+#[derive(Debug, Clone)]
+enum ParcelableHolderData {
+    Empty,
+    Parcelable {
+        parcelable: Rc<dyn AnyParcelable>,
+        name: String,
+    },
+    Parcel(Parcel),
+}
+
+impl Default for ParcelableHolderData {
+    fn default() -> Self {
+        ParcelableHolderData::Empty
+    }
+}
+
+/// A container that can hold any arbitrary `Parcelable`.
+///
+/// This type is currently used for AIDL parcelable fields.
+///
+/// `ParcelableHolder` is currently not thread-safe (neither
+/// `Send` nor `Sync`), mainly because it internally contains
+/// a `Parcel` which in turn is not thread-safe.
+#[derive(Debug, Default, Clone)]
+pub struct ParcelableHolder {
+    // This is a `RefCell` because of `get_parcelable`
+    // which takes `&self` for consistency with C++.
+    // We could make `get_parcelable` take a `&mut self`
+    // and get rid of the `RefCell` here for a performance
+    // improvement, but then callers would require a mutable
+    // `ParcelableHolder` even for that getter method.
+    data: RefCell<ParcelableHolderData>,
+    stability: Stability,
+}
+
+impl ParcelableHolder {
+    /// Construct a new `ParcelableHolder` with the given stability.
+    pub fn new(stability: Stability) -> Self {
+        Self {
+            data: RefCell::new(ParcelableHolderData::Empty),
+            stability,
+        }
+    }
+
+    /// Reset the contents of this `ParcelableHolder`.
+    ///
+    /// Note that this method does not reset the stability,
+    /// only the contents.
+    pub fn reset(&mut self) {
+        *self.data.get_mut() = ParcelableHolderData::Empty;
+        // We could also clear stability here, but C++ doesn't
+    }
+
+    /// Set the parcelable contained in this `ParcelableHolder`.
+    pub fn set_parcelable<T>(&mut self, p: Rc<T>) -> Result<()>
+    where
+        T: Any + Parcelable + ParcelableMetadata + std::fmt::Debug,
+    {
+        if self.stability > p.get_stability() {
+            return Err(StatusCode::BAD_VALUE);
+        }
+
+        *self.data.get_mut() = ParcelableHolderData::Parcelable {
+            parcelable: p,
+            name: T::get_descriptor().into(),
+        };
+
+        Ok(())
+    }
+
+    /// Retrieve the parcelable stored in this `ParcelableHolder`.
+    ///
+    /// This method attempts to retrieve the parcelable inside
+    /// the current object as a parcelable of type `T`.
+    /// The object is validated against `T` by checking that
+    /// its parcelable descriptor matches the one returned
+    /// by `T::get_descriptor()`.
+    ///
+    /// Returns one of the following:
+    /// * `Err(_)` in case of error
+    /// * `Ok(None)` if the holder is empty or the descriptor does not match
+    /// * `Ok(Some(_))` if the object holds a parcelable of type `T`
+    ///   with the correct descriptor
+    pub fn get_parcelable<T>(&self) -> Result<Option<Rc<T>>>
+    where
+        T: Any + Parcelable + ParcelableMetadata + Default + std::fmt::Debug,
+    {
+        let parcelable_desc = T::get_descriptor();
+        let mut data = self.data.borrow_mut();
+        match *data {
+            ParcelableHolderData::Empty => Ok(None),
+            ParcelableHolderData::Parcelable {
+                ref parcelable,
+                ref name,
+            } => {
+                if name != parcelable_desc {
+                    return Err(StatusCode::BAD_VALUE);
+                }
+
+                match Rc::clone(parcelable).downcast_rc::<T>() {
+                    Err(_) => Err(StatusCode::BAD_VALUE),
+                    Ok(x) => Ok(Some(x)),
+                }
+            }
+            ParcelableHolderData::Parcel(ref parcel) => {
+                unsafe {
+                    // Safety: 0 should always be a valid position.
+                    parcel.set_data_position(0)?;
+                }
+
+                let name: String = parcel.read()?;
+                if name != parcelable_desc {
+                    return Ok(None);
+                }
+
+                let mut parcelable = T::default();
+                parcelable.read_from_parcel(parcel)?;
+
+                let parcelable = Rc::new(parcelable);
+                let result = Rc::clone(&parcelable);
+                *data = ParcelableHolderData::Parcelable { parcelable, name };
+
+                Ok(Some(result))
+            }
+        }
+    }
+
+    /// Return the stability value of this object.
+    pub fn get_stability(&self) -> Stability {
+        self.stability
+    }
+}
+
+impl_serialize_for_parcelable!(ParcelableHolder);
+impl_deserialize_for_parcelable!(ParcelableHolder);
+
+impl Parcelable for ParcelableHolder {
+    fn write_to_parcel(&self, parcel: &mut Parcel) -> Result<()> {
+        parcel.write(&self.stability)?;
+
+        match *self.data.borrow() {
+            ParcelableHolderData::Empty => parcel.write(&0i32),
+            ParcelableHolderData::Parcelable {
+                ref parcelable,
+                ref name,
+            } => {
+                let length_start = parcel.get_data_position();
+                parcel.write(&0i32)?;
+
+                let data_start = parcel.get_data_position();
+                parcel.write(name)?;
+                parcelable.write_to_parcel(parcel)?;
+
+                let end = parcel.get_data_position();
+                unsafe {
+                    // Safety: we got the position from `get_data_position`.
+                    parcel.set_data_position(length_start)?;
+                }
+
+                assert!(end >= data_start);
+                parcel.write(&(end - data_start))?;
+                unsafe {
+                    // Safety: we got the position from `get_data_position`.
+                    parcel.set_data_position(end)?;
+                }
+
+                Ok(())
+            }
+            ParcelableHolderData::Parcel(ref p) => {
+                parcel.write(&p.get_data_size())?;
+                parcel.append_all_from(p)
+            }
+        }
+    }
+
+    fn read_from_parcel(&mut self, parcel: &Parcel) -> Result<()> {
+        self.stability = parcel.read()?;
+
+        let data_size: i32 = parcel.read()?;
+        if data_size < 0 {
+            // C++ returns BAD_VALUE here,
+            // while Java returns ILLEGAL_ARGUMENT
+            return Err(StatusCode::BAD_VALUE);
+        }
+        if data_size == 0 {
+            *self.data.get_mut() = ParcelableHolderData::Empty;
+            return Ok(());
+        }
+
+        // TODO: C++ ParcelableHolder accepts sizes up to SIZE_MAX here, but we
+        // only go up to i32::MAX because that's what our API uses everywhere
+        let data_start = parcel.get_data_position();
+        let data_end = data_start
+            .checked_add(data_size)
+            .ok_or(StatusCode::BAD_VALUE)?;
+
+        let mut new_parcel = Parcel::new();
+        new_parcel.append_from(parcel, data_start, data_size)?;
+        *self.data.get_mut() = ParcelableHolderData::Parcel(new_parcel);
+
+        unsafe {
+            // Safety: `append_from` checks if `data_size` overflows
+            // `parcel` and returns `BAD_VALUE` if that happens. We also
+            // explicitly check for negative and zero `data_size` above,
+            // so `data_end` is guaranteed to be greater than `data_start`.
+            parcel.set_data_position(data_end)?;
+        }
+
+        Ok(())
+    }
+}
diff --git a/libs/binder/tests/binderHostDeviceTest.cpp b/libs/binder/tests/binderHostDeviceTest.cpp
index eec3b44..464da60 100644
--- a/libs/binder/tests/binderHostDeviceTest.cpp
+++ b/libs/binder/tests/binderHostDeviceTest.cpp
@@ -65,7 +65,9 @@
 
 void initHostRpcServiceManagerOnce() {
     static std::once_flag gSmOnce;
-    std::call_once(gSmOnce, [] { setDefaultServiceManager(createRpcDelegateServiceManager()); });
+    std::call_once(gSmOnce, [] {
+        setDefaultServiceManager(createRpcDelegateServiceManager({.maxOutgoingThreads = 1}));
+    });
 }
 
 // Test for host service manager.
diff --git a/libs/binder/tests/binderRpcTest.cpp b/libs/binder/tests/binderRpcTest.cpp
index a1058bc..d68c6ff 100644
--- a/libs/binder/tests/binderRpcTest.cpp
+++ b/libs/binder/tests/binderRpcTest.cpp
@@ -481,6 +481,7 @@
         size_t numThreads = 1;
         size_t numSessions = 1;
         size_t numIncomingConnections = 0;
+        size_t numOutgoingConnections = SIZE_MAX;
     };
 
     static inline std::string PrintParamInfo(const testing::TestParamInfo<ParamType>& info) {
@@ -613,7 +614,8 @@
         status_t status;
 
         for (const auto& session : sessions) {
-            session->setMaxThreads(options.numIncomingConnections);
+            session->setMaxIncomingThreads(options.numIncomingConnections);
+            session->setMaxOutgoingThreads(options.numOutgoingConnections);
 
             switch (socketType) {
                 case SocketType::PRECONNECTED:
@@ -655,6 +657,9 @@
 
         return ret;
     }
+
+    void testThreadPoolOverSaturated(sp<IBinderRpcTest> iface, size_t numCalls,
+                                     size_t sleepMs = 500);
 };
 
 TEST_P(BinderRpc, Ping) {
@@ -996,28 +1001,39 @@
     for (auto& t : ts) t.join();
 }
 
-TEST_P(BinderRpc, ThreadPoolOverSaturated) {
-    constexpr size_t kNumThreads = 10;
-    constexpr size_t kNumCalls = kNumThreads + 3;
-    constexpr size_t kSleepMs = 500;
-
-    auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
-
+void BinderRpc::testThreadPoolOverSaturated(sp<IBinderRpcTest> iface, size_t numCalls,
+                                            size_t sleepMs) {
     size_t epochMsBefore = epochMillis();
 
     std::vector<std::thread> ts;
-    for (size_t i = 0; i < kNumCalls; i++) {
-        ts.push_back(std::thread([&] { proc.rootIface->sleepMs(kSleepMs); }));
+    for (size_t i = 0; i < numCalls; i++) {
+        ts.push_back(std::thread([&] { iface->sleepMs(sleepMs); }));
     }
 
     for (auto& t : ts) t.join();
 
     size_t epochMsAfter = epochMillis();
 
-    EXPECT_GE(epochMsAfter, epochMsBefore + 2 * kSleepMs);
+    EXPECT_GE(epochMsAfter, epochMsBefore + 2 * sleepMs);
 
     // Potential flake, but make sure calls are handled in parallel.
-    EXPECT_LE(epochMsAfter, epochMsBefore + 3 * kSleepMs);
+    EXPECT_LE(epochMsAfter, epochMsBefore + 3 * sleepMs);
+}
+
+TEST_P(BinderRpc, ThreadPoolOverSaturated) {
+    constexpr size_t kNumThreads = 10;
+    constexpr size_t kNumCalls = kNumThreads + 3;
+    auto proc = createRpcTestSocketServerProcess({.numThreads = kNumThreads});
+    testThreadPoolOverSaturated(proc.rootIface, kNumCalls);
+}
+
+TEST_P(BinderRpc, ThreadPoolLimitOutgoing) {
+    constexpr size_t kNumThreads = 20;
+    constexpr size_t kNumOutgoingConnections = 10;
+    constexpr size_t kNumCalls = kNumOutgoingConnections + 3;
+    auto proc = createRpcTestSocketServerProcess(
+            {.numThreads = kNumThreads, .numOutgoingConnections = kNumOutgoingConnections});
+    testThreadPoolOverSaturated(proc.rootIface, kNumCalls);
 }
 
 TEST_P(BinderRpc, ThreadingStressTest) {