Add test for async transaction queueing.
Previously, pending async transactions would be
queued to the thread todo list, which could cause
undesired behavior for clients using the poll
interface.
This test ensures that pending async transactions
are always moved to the proc todo list, so they
can't be handled during a transaction.
Bug: 38201220
Bug: 63075553
Bug: 63079216
Test: binderLibtest.OnewayQueueing passes (with kernel fix)
Change-Id: I4bf5b31454912a7cb35f1a28628b02a6406a7ddf
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index c8e478a..218b37e 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -28,6 +28,8 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
+#include <sys/epoll.h>
+
#define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
using namespace android;
@@ -50,8 +52,10 @@
BINDER_LIB_TEST_NOP_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
BINDER_LIB_TEST_REGISTER_SERVER,
BINDER_LIB_TEST_ADD_SERVER,
+ BINDER_LIB_TEST_ADD_POLL_SERVER,
BINDER_LIB_TEST_CALL_BACK,
BINDER_LIB_TEST_CALL_BACK_VERIFY_BUF,
+ BINDER_LIB_TEST_DELAYED_CALL_BACK,
BINDER_LIB_TEST_NOP_CALL_BACK,
BINDER_LIB_TEST_GET_SELF_TRANSACTION,
BINDER_LIB_TEST_GET_ID_TRANSACTION,
@@ -68,7 +72,7 @@
BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION,
};
-pid_t start_server_process(int arg2)
+pid_t start_server_process(int arg2, bool usePoll = false)
{
int ret;
pid_t pid;
@@ -76,11 +80,13 @@
int pipefd[2];
char stri[16];
char strpipefd1[16];
+ char usepoll[2];
char *childargv[] = {
binderservername,
binderserverarg,
stri,
strpipefd1,
+ usepoll,
binderserversuffix,
NULL
};
@@ -91,6 +97,7 @@
snprintf(stri, sizeof(stri), "%d", arg2);
snprintf(strpipefd1, sizeof(strpipefd1), "%d", pipefd[1]);
+ snprintf(usepoll, sizeof(usepoll), "%d", usePoll ? 1 : 0);
pid = fork();
if (pid == -1)
@@ -175,14 +182,14 @@
virtual void TearDown() {
}
protected:
- sp<IBinder> addServer(int32_t *idPtr = NULL)
+ sp<IBinder> addServerEtc(int32_t *idPtr, int code)
{
int ret;
int32_t id;
Parcel data, reply;
sp<IBinder> binder;
- ret = m_server->transact(BINDER_LIB_TEST_ADD_SERVER, data, &reply);
+ ret = m_server->transact(code, data, &reply);
EXPECT_EQ(NO_ERROR, ret);
EXPECT_FALSE(binder != NULL);
@@ -194,6 +201,17 @@
*idPtr = id;
return binder;
}
+
+ sp<IBinder> addServer(int32_t *idPtr = NULL)
+ {
+ return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_SERVER);
+ }
+
+ sp<IBinder> addPollServer(int32_t *idPtr = NULL)
+ {
+ return addServerEtc(idPtr, BINDER_LIB_TEST_ADD_POLL_SERVER);
+ }
+
void waitForReadData(int fd, int timeout_ms) {
int ret;
pollfd pfd = pollfd();
@@ -844,6 +862,42 @@
EXPECT_EQ(NO_ERROR, ret);
}
+TEST_F(BinderLibTest, OnewayQueueing)
+{
+ status_t ret;
+ Parcel data, data2;
+
+ sp<IBinder> pollServer = addPollServer();
+
+ sp<BinderLibTestCallBack> callBack = new BinderLibTestCallBack();
+ data.writeStrongBinder(callBack);
+ data.writeInt32(500000); // delay in us before calling back
+
+ sp<BinderLibTestCallBack> callBack2 = new BinderLibTestCallBack();
+ data2.writeStrongBinder(callBack2);
+ data2.writeInt32(0); // delay in us
+
+ ret = pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data, NULL, TF_ONE_WAY);
+ EXPECT_EQ(NO_ERROR, ret);
+
+ // The delay ensures that this second transaction will end up on the async_todo list
+ // (for a single-threaded server)
+ ret = pollServer->transact(BINDER_LIB_TEST_DELAYED_CALL_BACK, data2, NULL, TF_ONE_WAY);
+ EXPECT_EQ(NO_ERROR, ret);
+
+ // The server will ensure that the two transactions are handled in the expected order;
+ // If the ordering is not as expected, an error will be returned through the callbacks.
+ ret = callBack->waitEvent(2);
+ EXPECT_EQ(NO_ERROR, ret);
+ ret = callBack->getResult();
+ EXPECT_EQ(NO_ERROR, ret);
+
+ ret = callBack2->waitEvent(2);
+ EXPECT_EQ(NO_ERROR, ret);
+ ret = callBack2->getResult();
+ EXPECT_EQ(NO_ERROR, ret);
+}
+
class BinderLibTestService : public BBinder
{
public:
@@ -851,6 +905,7 @@
: m_id(id)
, m_nextServerId(id + 1)
, m_serverStartRequested(false)
+ , m_callback(NULL)
{
pthread_mutex_init(&m_serverWaitMutex, NULL);
pthread_cond_init(&m_serverWaitCond, NULL);
@@ -859,6 +914,16 @@
{
exit(EXIT_SUCCESS);
}
+
+ void processPendingCall() {
+ if (m_callback != NULL) {
+ Parcel data;
+ data.writeInt32(NO_ERROR);
+ m_callback->transact(BINDER_LIB_TEST_CALL_BACK, data, nullptr, TF_ONE_WAY);
+ m_callback = NULL;
+ }
+ }
+
virtual status_t onTransact(uint32_t code,
const Parcel& data, Parcel* reply,
uint32_t flags = 0) {
@@ -890,6 +955,7 @@
pthread_mutex_unlock(&m_serverWaitMutex);
return NO_ERROR;
}
+ case BINDER_LIB_TEST_ADD_POLL_SERVER:
case BINDER_LIB_TEST_ADD_SERVER: {
int ret;
uint8_t buf[1] = { 0 };
@@ -904,9 +970,10 @@
} else {
serverid = m_nextServerId++;
m_serverStartRequested = true;
+ bool usePoll = code == BINDER_LIB_TEST_ADD_POLL_SERVER;
pthread_mutex_unlock(&m_serverWaitMutex);
- ret = start_server_process(serverid);
+ ret = start_server_process(serverid, usePoll);
pthread_mutex_lock(&m_serverWaitMutex);
}
if (ret > 0) {
@@ -934,6 +1001,42 @@
}
case BINDER_LIB_TEST_NOP_TRANSACTION:
return NO_ERROR;
+ case BINDER_LIB_TEST_DELAYED_CALL_BACK: {
+ // Note: this transaction is only designed for use with a
+ // poll() server. See comments around epoll_wait().
+ if (m_callback != NULL) {
+ // A callback was already pending; this means that
+ // we received a second call while still processing
+ // the first one. Fail the test.
+ sp<IBinder> callback = data.readStrongBinder();
+ Parcel data2;
+ data2.writeInt32(UNKNOWN_ERROR);
+
+ callback->transact(BINDER_LIB_TEST_CALL_BACK, data2, NULL, TF_ONE_WAY);
+ } else {
+ m_callback = data.readStrongBinder();
+ int32_t delayUs = data.readInt32();
+ /*
+ * It's necessary that we sleep here, so the next
+ * transaction the caller makes will be queued to
+ * the async queue.
+ */
+ usleep(delayUs);
+
+ /*
+ * Now when we return, libbinder will tell the kernel
+ * we are done with this transaction, and the kernel
+ * can move the queued transaction to either the
+ * thread todo worklist (for kernels without the fix),
+ * or the proc todo worklist. In case of the former,
+ * the next outbound call will pick up the pending
+ * transaction, which leads to undesired reentrant
+ * behavior. This is caught in the if() branch above.
+ */
+ }
+
+ return NO_ERROR;
+ }
case BINDER_LIB_TEST_NOP_CALL_BACK: {
Parcel data2, reply2;
sp<IBinder> binder;
@@ -1083,16 +1186,26 @@
bool m_serverStartRequested;
sp<IBinder> m_serverStarted;
sp<IBinder> m_strongRef;
+ bool m_callbackPending;
+ sp<IBinder> m_callback;
};
-int run_server(int index, int readypipefd)
+int run_server(int index, int readypipefd, bool usePoll)
{
binderLibTestServiceName += String16(binderserversuffix);
status_t ret;
sp<IServiceManager> sm = defaultServiceManager();
+ BinderLibTestService* testServicePtr;
{
sp<BinderLibTestService> testService = new BinderLibTestService(index);
+ /*
+ * We need this below, but can't hold a sp<> because it prevents the
+ * node from being cleaned up automatically. It's safe in this case
+ * because of how the tests are written.
+ */
+ testServicePtr = testService.get();
+
if (index == 0) {
ret = sm->addService(binderLibTestServiceName, testService);
} else {
@@ -1110,8 +1223,53 @@
if (ret)
return 1;
//printf("%s: joinThreadPool\n", __func__);
- ProcessState::self()->startThreadPool();
- IPCThreadState::self()->joinThreadPool();
+ if (usePoll) {
+ int fd;
+ struct epoll_event ev;
+ int epoll_fd;
+ IPCThreadState::self()->setupPolling(&fd);
+ if (fd < 0) {
+ return 1;
+ }
+ IPCThreadState::self()->flushCommands(); // flush BC_ENTER_LOOPER
+
+ epoll_fd = epoll_create1(0);
+ if (epoll_fd == -1) {
+ return 1;
+ }
+
+ ev.events = EPOLLIN;
+ if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &ev) == -1) {
+ return 1;
+ }
+
+ while (1) {
+ /*
+ * We simulate a single-threaded process using the binder poll
+ * interface; besides handling binder commands, it can also
+ * issue outgoing transactions, by storing a callback in
+ * m_callback and setting m_callbackPending.
+ *
+ * processPendingCall() will then issue that transaction.
+ */
+ struct epoll_event events[1];
+ int numEvents = epoll_wait(epoll_fd, events, 1, 1000);
+ if (numEvents < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+ return 1;
+ }
+ if (numEvents > 0) {
+ IPCThreadState::self()->handlePolledCommands();
+ IPCThreadState::self()->flushCommands(); // flush BC_FREE_BUFFER
+ testServicePtr->processPendingCall();
+ }
+ }
+ } else {
+ ProcessState::self()->startThreadPool();
+ IPCThreadState::self()->joinThreadPool();
+ }
//printf("%s: joinThreadPool returned\n", __func__);
return 1; /* joinThreadPool should not return */
}
@@ -1125,9 +1283,9 @@
binderservername = argv[0];
}
- if (argc == 5 && !strcmp(argv[1], binderserverarg)) {
- binderserversuffix = argv[4];
- return run_server(atoi(argv[2]), atoi(argv[3]));
+ if (argc == 6 && !strcmp(argv[1], binderserverarg)) {
+ binderserversuffix = argv[5];
+ return run_server(atoi(argv[2]), atoi(argv[3]), atoi(argv[4]) == 1);
}
binderserversuffix = new char[16];
snprintf(binderserversuffix, 16, "%d", getpid());