This test is used to check if the kernel scheduler
1. supports priority inheritance.
2. utilize the sync flag.
It also make statistic on average, best and worst latency
as well as number of deadline miss with a configurable
deadline specified in us.
The output is in json format hopefully to make it easy
to integrated with present test framework.
usage:
./schd-dbg # test and output json
./schd-dbg -i 1 # number of iterations
./schd-dbg -pair 4 # number of process pairs
./schd-dbg -deadline_us 2500 # deadline in us
./schd-dbg -v # debug
Test: pixel
Change-Id: Idccf0d402dcd7b81c639020364b5ee32075e18cb
diff --git a/libs/binder/tests/schd-dbg.cpp b/libs/binder/tests/schd-dbg.cpp
new file mode 100644
index 0000000..2732071
--- /dev/null
+++ b/libs/binder/tests/schd-dbg.cpp
@@ -0,0 +1,426 @@
+#include <binder/Binder.h>
+#include <binder/IBinder.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+
+#include <iomanip>
+#include <iostream>
+#include <tuple>
+#include <vector>
+
+#include <pthread.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+using namespace std;
+using namespace android;
+
+enum BinderWorkerServiceCode {
+ BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+#define ASSERT(cond) \
+ do { \
+ if (!(cond)) { \
+ cerr << __func__ << ":" << __LINE__ << " condition:" << #cond \
+ << " failed\n" \
+ << endl; \
+ exit(EXIT_FAILURE); \
+ } \
+ } while (0)
+
+vector<sp<IBinder> > workers;
+
+// the ratio that the service is synced on the same cpu beyond
+// GOOD_SYNC_MIN is considered as good
+#define GOOD_SYNC_MIN (0.6)
+
+#define DUMP_PRICISION 3
+
+// the default value
+int no_process = 2;
+int iterations = 100;
+int payload_size = 16;
+int no_inherent = 0;
+int no_sync = 0;
+int verbose = 0;
+
+// the deadline latency that we are interested in
+uint64_t deadline_us = 2500;
+
+int thread_pri() {
+ struct sched_param param;
+ int policy;
+ ASSERT(!pthread_getschedparam(pthread_self(), &policy, ¶m));
+ return param.sched_priority;
+}
+
+void thread_dump(const char* prefix) {
+ struct sched_param param;
+ int policy;
+ if (!verbose) return;
+ cout << "--------------------------------------------------" << endl;
+ cout << setw(12) << left << prefix << " pid: " << getpid()
+ << " tid: " << gettid() << " cpu: " << sched_getcpu() << endl;
+ ASSERT(!pthread_getschedparam(pthread_self(), &policy, ¶m));
+ string s = (policy == SCHED_OTHER)
+ ? "SCHED_OTHER"
+ : (policy == SCHED_FIFO)
+ ? "SCHED_FIFO"
+ : (policy == SCHED_RR) ? "SCHED_RR" : "???";
+ cout << setw(12) << left << s << param.sched_priority << endl;
+ return;
+}
+
+class BinderWorkerService : public BBinder {
+ public:
+ BinderWorkerService() {
+ }
+ ~BinderWorkerService() {
+ }
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0) {
+ (void)flags;
+ (void)data;
+ (void)reply;
+ switch (code) {
+ // The transaction format is like
+ //
+ // data[in]: int32: caller priority
+ // int32: caller cpu
+ //
+ // reply[out]: int32: 1 if caller's priority != callee's priority
+ // int32: 1 if caller's cpu != callee's cpu
+ //
+ // note the caller cpu read here is not always correct
+ // there're still chances that the caller got switched out
+ // right after it read the cpu number and still before the transaction.
+ case BINDER_NOP: {
+ thread_dump("binder");
+ int priority = thread_pri();
+ int priority_caller = data.readInt32();
+ int h = 0, s = 0;
+ if (priority_caller != priority) {
+ h++;
+ if (verbose) {
+ cout << "err priority_caller:" << priority_caller
+ << ", priority:" << priority << endl;
+ }
+ }
+ if (priority == sched_get_priority_max(SCHED_FIFO)) {
+ int cpu = sched_getcpu();
+ int cpu_caller = data.readInt32();
+ if (cpu != cpu_caller) {
+ s++;
+ }
+ }
+ reply->writeInt32(h);
+ reply->writeInt32(s);
+ return NO_ERROR;
+ }
+ default:
+ return UNKNOWN_TRANSACTION;
+ };
+ }
+};
+
+class Pipe {
+ int m_readFd;
+ int m_writeFd;
+ Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {
+ }
+ Pipe(const Pipe&) = delete;
+ Pipe& operator=(const Pipe&) = delete;
+ Pipe& operator=(const Pipe&&) = delete;
+
+ public:
+ Pipe(Pipe&& rval) noexcept {
+ m_readFd = rval.m_readFd;
+ m_writeFd = rval.m_writeFd;
+ rval.m_readFd = 0;
+ rval.m_writeFd = 0;
+ }
+ ~Pipe() {
+ if (m_readFd) close(m_readFd);
+ if (m_writeFd) close(m_writeFd);
+ }
+ void signal() {
+ bool val = true;
+ int error = write(m_writeFd, &val, sizeof(val));
+ ASSERT(error >= 0);
+ };
+ void wait() {
+ bool val = false;
+ int error = read(m_readFd, &val, sizeof(val));
+ ASSERT(error >= 0);
+ }
+ template <typename T>
+ void send(const T& v) {
+ int error = write(m_writeFd, &v, sizeof(T));
+ ASSERT(error >= 0);
+ }
+ template <typename T>
+ void recv(T& v) {
+ int error = read(m_readFd, &v, sizeof(T));
+ ASSERT(error >= 0);
+ }
+ static tuple<Pipe, Pipe> createPipePair() {
+ int a[2];
+ int b[2];
+
+ int error1 = pipe(a);
+ int error2 = pipe(b);
+ ASSERT(error1 >= 0);
+ ASSERT(error2 >= 0);
+
+ return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
+ }
+};
+
+typedef chrono::time_point<chrono::high_resolution_clock> Tick;
+
+static inline Tick tickNow() {
+ return chrono::high_resolution_clock::now();
+}
+
+static inline uint64_t tickNano(Tick& sta, Tick& end) {
+ return uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - sta).count());
+}
+
+struct Results {
+ uint64_t m_best = 0xffffffffffffffffULL;
+ uint64_t m_worst = 0;
+ uint64_t m_transactions = 0;
+ uint64_t m_total_time = 0;
+ uint64_t m_miss = 0;
+
+ void add_time(uint64_t nano) {
+ m_best = min(nano, m_best);
+ m_worst = max(nano, m_worst);
+ m_transactions += 1;
+ m_total_time += nano;
+ if (nano > deadline_us * 1000) m_miss++;
+ }
+ void dump() {
+ double best = (double)m_best / 1.0E6;
+ double worst = (double)m_worst / 1.0E6;
+ double average = (double)m_total_time / m_transactions / 1.0E6;
+ // FIXME: libjson?
+ cout << std::setprecision(DUMP_PRICISION) << "{ \"avg\":" << setw(5) << left
+ << average << ", \"wst\":" << setw(5) << left << worst
+ << ", \"bst\":" << setw(5) << left << best << ", \"miss\":" << m_miss
+ << "}";
+ }
+};
+
+String16 generateServiceName(int num) {
+ char num_str[32];
+ snprintf(num_str, sizeof(num_str), "%d", num);
+ String16 serviceName = String16("binderWorker") + String16(num_str);
+ return serviceName;
+}
+
+static void parcel_fill(Parcel& data, int sz, int priority, int cpu) {
+ ASSERT(sz >= (int)sizeof(uint32_t) * 2);
+ data.writeInt32(priority);
+ data.writeInt32(cpu);
+ sz -= sizeof(uint32_t);
+ while (sz > (int)sizeof(uint32_t)) {
+ data.writeInt32(0);
+ sz -= sizeof(uint32_t);
+ }
+}
+
+static void* thread_start(void* p) {
+ Results* results_fifo = (Results*)p;
+ Parcel data, reply;
+ Tick sta, end;
+
+ parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
+ thread_dump("fifo-caller");
+
+ sta = tickNow();
+ status_t ret = workers[0]->transact(BINDER_NOP, data, &reply);
+ end = tickNow();
+ results_fifo->add_time(tickNano(sta, end));
+
+ no_inherent += reply.readInt32();
+ no_sync += reply.readInt32();
+ return 0;
+}
+
+// create a fifo thread to transact and wait it to finished
+static void thread_transaction(Results* results_fifo) {
+ void* dummy;
+ pthread_t thread;
+ pthread_attr_t attr;
+ struct sched_param param;
+ ASSERT(!pthread_attr_init(&attr));
+ ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
+ param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+ ASSERT(!pthread_attr_setschedparam(&attr, ¶m));
+ ASSERT(!pthread_create(&thread, &attr, &thread_start, results_fifo));
+ ASSERT(!pthread_join(thread, &dummy));
+}
+
+#define is_client(_num) ((_num) >= (no_process / 2))
+
+void worker_fx(int num, int no_process, int iterations, int payload_size,
+ Pipe p) {
+ int dummy;
+ Results results_other, results_fifo;
+
+ // Create BinderWorkerService and for go.
+ ProcessState::self()->startThreadPool();
+ sp<IServiceManager> serviceMgr = defaultServiceManager();
+ sp<BinderWorkerService> service = new BinderWorkerService;
+ serviceMgr->addService(generateServiceName(num), service);
+ p.signal();
+ p.wait();
+
+ // If client/server pairs, then half the workers are
+ // servers and half are clients
+ int server_count = no_process / 2;
+
+ for (int i = 0; i < server_count; i++) {
+ // self service is in-process so just skip
+ if (num == i) continue;
+ workers.push_back(serviceMgr->getService(generateServiceName(i)));
+ }
+
+ // Client for each pair iterates here
+ // each iterations contains exatcly 2 transactions
+ for (int i = 0; is_client(num) && i < iterations; i++) {
+ Parcel data, reply;
+ Tick sta, end;
+ // the target is paired to make it easier to diagnose
+ int target = num % server_count;
+
+ // 1. transaction by fifo thread
+ thread_transaction(&results_fifo);
+ parcel_fill(data, payload_size, thread_pri(), sched_getcpu());
+ thread_dump("other-caller");
+
+ // 2. transaction by other thread
+ sta = tickNow();
+ ASSERT(NO_ERROR == workers[target]->transact(BINDER_NOP, data, &reply));
+ end = tickNow();
+ results_other.add_time(tickNano(sta, end));
+
+ no_inherent += reply.readInt32();
+ no_sync += reply.readInt32();
+ }
+ // Signal completion to master and wait.
+ p.signal();
+ p.wait();
+
+ p.send(&dummy);
+ p.wait();
+ // Client for each pair dump here
+ if (is_client(num)) {
+ int no_trans = iterations * 2;
+ double sync_ratio = (1.0 - (double)no_sync / no_trans);
+ // FIXME: libjson?
+ cout << "\"P" << (num - server_count) << "\":{\"SYNC\":\""
+ << ((sync_ratio > GOOD_SYNC_MIN) ? "GOOD" : "POOR") << "\","
+ << "\"S\":" << (no_trans - no_sync) << ",\"I\":" << no_trans << ","
+ << "\"R\":" << sync_ratio << "," << endl;
+
+ cout << " \"other_ms\":";
+ results_other.dump();
+ cout << "," << endl;
+ cout << " \"fifo_ms\": ";
+ results_fifo.dump();
+ cout << endl;
+ cout << "}," << endl;
+ }
+ exit(no_inherent);
+}
+
+Pipe make_process(int num, int iterations, int no_process, int payload_size) {
+ auto pipe_pair = Pipe::createPipePair();
+ pid_t pid = fork();
+ if (pid) {
+ // parent
+ return move(get<0>(pipe_pair));
+ } else {
+ // child
+ thread_dump(is_client(num) ? "client" : "server");
+ worker_fx(num, no_process, iterations, payload_size,
+ move(get<1>(pipe_pair)));
+ // never get here
+ return move(get<0>(pipe_pair));
+ }
+}
+
+void wait_all(vector<Pipe>& v) {
+ for (size_t i = 0; i < v.size(); i++) {
+ v[i].wait();
+ }
+}
+
+void signal_all(vector<Pipe>& v) {
+ for (size_t i = 0; i < v.size(); i++) {
+ v[i].signal();
+ }
+}
+
+// This test is modified from binderThroughputTest.cpp
+int main(int argc, char** argv) {
+ for (int i = 1; i < argc; i++) {
+ if (string(argv[i]) == "-i") {
+ iterations = atoi(argv[i + 1]);
+ i++;
+ continue;
+ }
+ if (string(argv[i]) == "-pair") {
+ no_process = 2 * atoi(argv[i + 1]);
+ i++;
+ continue;
+ }
+ if (string(argv[i]) == "-deadline_us") {
+ deadline_us = atoi(argv[i + 1]);
+ i++;
+ continue;
+ }
+ if (string(argv[i]) == "-v") {
+ verbose = 1;
+ i++;
+ }
+ }
+ vector<Pipe> pipes;
+ thread_dump("main");
+ // FIXME: libjson?
+ cout << "{" << endl;
+ cout << "\"cfg\":{\"pair\":" << (no_process / 2)
+ << ",\"iterations\":" << iterations << ",\"deadline_us\":" << deadline_us
+ << "}," << endl;
+
+ // the main process fork 2 processes for each pairs
+ // 1 server + 1 client
+ // each has a pipe to communicate with
+ for (int i = 0; i < no_process; i++) {
+ pipes.push_back(make_process(i, iterations, no_process, payload_size));
+ }
+ wait_all(pipes);
+ signal_all(pipes);
+ wait_all(pipes);
+ signal_all(pipes);
+ for (int i = 0; i < no_process; i++) {
+ int status;
+ pipes[i].signal();
+ wait(&status);
+ // the exit status is number of transactions without priority inheritance
+ // detected in the child process
+ no_inherent += status;
+ }
+ // FIXME: libjson?
+ cout << "\"inheritance\": " << (no_inherent == 0 ? "\"PASS\"" : "\"FAIL\"")
+ << endl;
+ cout << "}" << endl;
+ return -no_inherent;
+}