| Riley Andrews | 515aa0b | 2015-08-11 13:19:53 -0700 | [diff] [blame] | 1 | #include <binder/Binder.h> | 
|  | 2 | #include <binder/IBinder.h> | 
|  | 3 | #include <binder/IPCThreadState.h> | 
|  | 4 | #include <binder/IServiceManager.h> | 
|  | 5 | #include <string> | 
|  | 6 | #include <cstring> | 
|  | 7 | #include <cstdlib> | 
|  | 8 | #include <cstdio> | 
|  | 9 |  | 
|  | 10 | #include <iostream> | 
|  | 11 | #include <vector> | 
|  | 12 | #include <tuple> | 
|  | 13 |  | 
|  | 14 | #include <unistd.h> | 
|  | 15 | #include <sys/wait.h> | 
|  | 16 |  | 
|  | 17 | using namespace std; | 
|  | 18 | using namespace android; | 
|  | 19 |  | 
|  | 20 | enum BinderWorkerServiceCode { | 
|  | 21 | BINDER_NOP = IBinder::FIRST_CALL_TRANSACTION, | 
|  | 22 | }; | 
|  | 23 |  | 
|  | 24 | #define ASSERT_TRUE(cond) \ | 
|  | 25 | do { \ | 
|  | 26 | if (!(cond)) {\ | 
|  | 27 | cerr << __func__ << ":" << __LINE__ << " condition:" << #cond << " failed\n" << endl; \ | 
|  | 28 | exit(EXIT_FAILURE); \ | 
|  | 29 | } \ | 
|  | 30 | } while (0) | 
|  | 31 |  | 
|  | 32 | class BinderWorkerService : public BBinder | 
|  | 33 | { | 
|  | 34 | public: | 
|  | 35 | BinderWorkerService() {} | 
|  | 36 | ~BinderWorkerService() {} | 
|  | 37 | virtual status_t onTransact(uint32_t code, | 
|  | 38 | const Parcel& data, Parcel* reply, | 
|  | 39 | uint32_t flags = 0) { | 
|  | 40 | (void)flags; | 
|  | 41 | (void)data; | 
|  | 42 | (void)reply; | 
|  | 43 | switch (code) { | 
|  | 44 | case BINDER_NOP: | 
|  | 45 | return NO_ERROR; | 
|  | 46 | default: | 
|  | 47 | return UNKNOWN_TRANSACTION; | 
|  | 48 | }; | 
|  | 49 | } | 
|  | 50 | }; | 
|  | 51 |  | 
|  | 52 | class Pipe { | 
|  | 53 | int m_readFd; | 
|  | 54 | int m_writeFd; | 
|  | 55 | Pipe(int readFd, int writeFd) : m_readFd{readFd}, m_writeFd{writeFd} {} | 
|  | 56 | Pipe(const Pipe &) = delete; | 
|  | 57 | Pipe& operator=(const Pipe &) = delete; | 
|  | 58 | Pipe& operator=(const Pipe &&) = delete; | 
|  | 59 | public: | 
|  | 60 | Pipe(Pipe&& rval) noexcept { | 
|  | 61 | m_readFd = rval.m_readFd; | 
|  | 62 | m_writeFd = rval.m_writeFd; | 
|  | 63 | rval.m_readFd = 0; | 
|  | 64 | rval.m_writeFd = 0; | 
|  | 65 | } | 
|  | 66 | ~Pipe() { | 
|  | 67 | if (m_readFd) | 
|  | 68 | close(m_readFd); | 
|  | 69 | if (m_writeFd) | 
|  | 70 | close(m_writeFd); | 
|  | 71 | } | 
|  | 72 | void signal() { | 
|  | 73 | bool val = true; | 
|  | 74 | int error = write(m_writeFd, &val, sizeof(val)); | 
|  | 75 | ASSERT_TRUE(error >= 0); | 
|  | 76 | }; | 
|  | 77 | void wait() { | 
|  | 78 | bool val = false; | 
|  | 79 | int error = read(m_readFd, &val, sizeof(val)); | 
|  | 80 | ASSERT_TRUE(error >= 0); | 
|  | 81 | } | 
|  | 82 | template <typename T> void send(const T& v) { | 
|  | 83 | int error = write(m_writeFd, &v, sizeof(T)); | 
|  | 84 | ASSERT_TRUE(error >= 0); | 
|  | 85 | } | 
|  | 86 | template <typename T> void recv(T& v) { | 
|  | 87 | int error = read(m_readFd, &v, sizeof(T)); | 
|  | 88 | ASSERT_TRUE(error >= 0); | 
|  | 89 | } | 
|  | 90 | static tuple<Pipe, Pipe> createPipePair() { | 
|  | 91 | int a[2]; | 
|  | 92 | int b[2]; | 
|  | 93 |  | 
|  | 94 | int error1 = pipe(a); | 
|  | 95 | int error2 = pipe(b); | 
|  | 96 | ASSERT_TRUE(error1 >= 0); | 
|  | 97 | ASSERT_TRUE(error2 >= 0); | 
|  | 98 |  | 
|  | 99 | return make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1])); | 
|  | 100 | } | 
|  | 101 | }; | 
|  | 102 |  | 
|  | 103 | static const uint32_t num_buckets = 128; | 
|  | 104 | static const uint64_t max_time_bucket = 50ull * 1000000; | 
|  | 105 | static const uint64_t time_per_bucket = max_time_bucket / num_buckets; | 
|  | 106 | static constexpr float time_per_bucket_ms = time_per_bucket / 1.0E6; | 
|  | 107 |  | 
|  | 108 | struct ProcResults { | 
|  | 109 | uint64_t m_best = max_time_bucket; | 
|  | 110 | uint64_t m_worst = 0; | 
|  | 111 | uint32_t m_buckets[num_buckets] = {0}; | 
|  | 112 | uint64_t m_transactions = 0; | 
|  | 113 | uint64_t m_total_time = 0; | 
|  | 114 |  | 
|  | 115 | void add_time(uint64_t time) { | 
|  | 116 | m_buckets[min(time, max_time_bucket-1) / time_per_bucket] += 1; | 
|  | 117 | m_best = min(time, m_best); | 
|  | 118 | m_worst = max(time, m_worst); | 
|  | 119 | m_transactions += 1; | 
|  | 120 | m_total_time += time; | 
|  | 121 | } | 
|  | 122 | static ProcResults combine(const ProcResults& a, const ProcResults& b) { | 
|  | 123 | ProcResults ret; | 
|  | 124 | for (int i = 0; i < num_buckets; i++) { | 
|  | 125 | ret.m_buckets[i] = a.m_buckets[i] + b.m_buckets[i]; | 
|  | 126 | } | 
|  | 127 | ret.m_worst = max(a.m_worst, b.m_worst); | 
|  | 128 | ret.m_best = min(a.m_best, b.m_best); | 
|  | 129 | ret.m_transactions = a.m_transactions + b.m_transactions; | 
|  | 130 | ret.m_total_time = a.m_total_time + b.m_total_time; | 
|  | 131 | return ret; | 
|  | 132 | } | 
|  | 133 | void dump() { | 
|  | 134 | double best = (double)m_best / 1.0E6; | 
|  | 135 | double worst = (double)m_worst / 1.0E6; | 
|  | 136 | double average = (double)m_total_time / m_transactions / 1.0E6; | 
|  | 137 | cout << "average:" << average << "ms worst:" << worst << "ms best:" << best << "ms" << endl; | 
|  | 138 |  | 
|  | 139 | uint64_t cur_total = 0; | 
|  | 140 | for (int i = 0; i < num_buckets; i++) { | 
|  | 141 | float cur_time = time_per_bucket_ms * i + 0.5f * time_per_bucket_ms; | 
|  | 142 | if ((cur_total < 0.5f * m_transactions) && (cur_total + m_buckets[i] >= 0.5f * m_transactions)) { | 
|  | 143 | cout << "50%: " << cur_time << " "; | 
|  | 144 | } | 
|  | 145 | if ((cur_total < 0.9f * m_transactions) && (cur_total + m_buckets[i] >= 0.9f * m_transactions)) { | 
|  | 146 | cout << "90%: " << cur_time << " "; | 
|  | 147 | } | 
|  | 148 | if ((cur_total < 0.95f * m_transactions) && (cur_total + m_buckets[i] >= 0.95f * m_transactions)) { | 
|  | 149 | cout << "95%: " << cur_time << " "; | 
|  | 150 | } | 
|  | 151 | if ((cur_total < 0.99f * m_transactions) && (cur_total + m_buckets[i] >= 0.99f * m_transactions)) { | 
|  | 152 | cout << "99%: " << cur_time << " "; | 
|  | 153 | } | 
|  | 154 | cur_total += m_buckets[i]; | 
|  | 155 | } | 
|  | 156 | cout << endl; | 
|  | 157 |  | 
|  | 158 | } | 
|  | 159 | }; | 
|  | 160 |  | 
|  | 161 | String16 generateServiceName(int num) | 
|  | 162 | { | 
|  | 163 | char num_str[32]; | 
|  | 164 | snprintf(num_str, sizeof(num_str), "%d", num); | 
|  | 165 | String16 serviceName = String16("binderWorker") + String16(num_str); | 
|  | 166 | return serviceName; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | void worker_fx( | 
|  | 170 | int num, | 
|  | 171 | int worker_count, | 
|  | 172 | int iterations, | 
|  | 173 | Pipe p) | 
|  | 174 | { | 
|  | 175 | // Create BinderWorkerService and for go. | 
|  | 176 | ProcessState::self()->startThreadPool(); | 
|  | 177 | sp<IServiceManager> serviceMgr = defaultServiceManager(); | 
|  | 178 | sp<BinderWorkerService> service = new BinderWorkerService; | 
|  | 179 | serviceMgr->addService(generateServiceName(num), service); | 
|  | 180 |  | 
|  | 181 | srand(num); | 
|  | 182 | p.signal(); | 
|  | 183 | p.wait(); | 
|  | 184 |  | 
|  | 185 | // Get references to other binder services. | 
|  | 186 | cout << "Created BinderWorker" << num << endl; | 
|  | 187 | (void)worker_count; | 
|  | 188 | vector<sp<IBinder> > workers; | 
|  | 189 | for (int i = 0; i < worker_count; i++) { | 
|  | 190 | if (num == i) | 
|  | 191 | continue; | 
|  | 192 | workers.push_back(serviceMgr->getService(generateServiceName(i))); | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | // Run the benchmark. | 
|  | 196 | ProcResults results; | 
|  | 197 | chrono::time_point<chrono::high_resolution_clock> start, end; | 
|  | 198 | for (int i = 0; i < iterations; i++) { | 
|  | 199 | int target = rand() % workers.size(); | 
|  | 200 | Parcel data, reply; | 
|  | 201 | start = chrono::high_resolution_clock::now(); | 
|  | 202 | status_t ret = workers[target]->transact(BINDER_NOP, data, &reply); | 
|  | 203 | end = chrono::high_resolution_clock::now(); | 
|  | 204 |  | 
|  | 205 | uint64_t cur_time = uint64_t(chrono::duration_cast<chrono::nanoseconds>(end - start).count()); | 
|  | 206 | results.add_time(cur_time); | 
|  | 207 |  | 
|  | 208 | if (ret != NO_ERROR) { | 
|  | 209 | cout << "thread " << num << " failed " << ret << "i : " << i << endl; | 
|  | 210 | exit(EXIT_FAILURE); | 
|  | 211 | } | 
|  | 212 | } | 
|  | 213 | // Signal completion to master and wait. | 
|  | 214 | p.signal(); | 
|  | 215 | p.wait(); | 
|  | 216 |  | 
|  | 217 | // Send results to master and wait for go to exit. | 
|  | 218 | p.send(results); | 
|  | 219 | p.wait(); | 
|  | 220 |  | 
|  | 221 | exit(EXIT_SUCCESS); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | Pipe make_worker(int num, int iterations, int worker_count) | 
|  | 225 | { | 
|  | 226 | auto pipe_pair = Pipe::createPipePair(); | 
|  | 227 | pid_t pid = fork(); | 
|  | 228 | if (pid) { | 
|  | 229 | /* parent */ | 
|  | 230 | return move(get<0>(pipe_pair)); | 
|  | 231 | } else { | 
|  | 232 | /* child */ | 
|  | 233 | worker_fx(num, worker_count, iterations, move(get<1>(pipe_pair))); | 
|  | 234 | /* never get here */ | 
|  | 235 | return move(get<0>(pipe_pair)); | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | void wait_all(vector<Pipe>& v) | 
|  | 241 | { | 
|  | 242 | for (int i = 0; i < v.size(); i++) { | 
|  | 243 | v[i].wait(); | 
|  | 244 | } | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | void signal_all(vector<Pipe>& v) | 
|  | 248 | { | 
|  | 249 | for (int i = 0; i < v.size(); i++) { | 
|  | 250 | v[i].signal(); | 
|  | 251 | } | 
|  | 252 | } | 
|  | 253 |  | 
|  | 254 | int main(int argc, char *argv[]) | 
|  | 255 | { | 
|  | 256 | int workers = 2; | 
|  | 257 | int iterations = 10000; | 
|  | 258 | (void)argc; | 
|  | 259 | (void)argv; | 
|  | 260 | vector<Pipe> pipes; | 
|  | 261 |  | 
|  | 262 | // Parse arguments. | 
|  | 263 | for (int i = 1; i < argc; i++) { | 
|  | 264 | if (string(argv[i]) == "-w") { | 
|  | 265 | workers = atoi(argv[i+1]); | 
|  | 266 | i++; | 
|  | 267 | continue; | 
|  | 268 | } | 
|  | 269 | if (string(argv[i]) == "-i") { | 
|  | 270 | iterations = atoi(argv[i+1]); | 
|  | 271 | i++; | 
|  | 272 | continue; | 
|  | 273 | } | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | // Create all the workers and wait for them to spawn. | 
|  | 277 | for (int i = 0; i < workers; i++) { | 
|  | 278 | pipes.push_back(make_worker(i, iterations, workers)); | 
|  | 279 | } | 
|  | 280 | wait_all(pipes); | 
|  | 281 |  | 
|  | 282 |  | 
|  | 283 | // Run the workers and wait for completion. | 
|  | 284 | chrono::time_point<chrono::high_resolution_clock> start, end; | 
|  | 285 | cout << "waiting for workers to complete" << endl; | 
|  | 286 | start = chrono::high_resolution_clock::now(); | 
|  | 287 | signal_all(pipes); | 
|  | 288 | wait_all(pipes); | 
|  | 289 | end = chrono::high_resolution_clock::now(); | 
|  | 290 |  | 
|  | 291 | // Calculate overall throughput. | 
|  | 292 | double iterations_per_sec = double(iterations * workers) / (chrono::duration_cast<chrono::nanoseconds>(end - start).count() / 1.0E9); | 
|  | 293 | cout << "iterations per sec: " << iterations_per_sec << endl; | 
|  | 294 |  | 
|  | 295 | // Collect all results from the workers. | 
|  | 296 | cout << "collecting results" << endl; | 
|  | 297 | signal_all(pipes); | 
|  | 298 | ProcResults tot_results; | 
|  | 299 | for (int i = 0; i < workers; i++) { | 
|  | 300 | ProcResults tmp_results; | 
|  | 301 | pipes[i].recv(tmp_results); | 
|  | 302 | tot_results = ProcResults::combine(tot_results, tmp_results); | 
|  | 303 | } | 
|  | 304 | tot_results.dump(); | 
|  | 305 |  | 
|  | 306 | // Kill all the workers. | 
|  | 307 | cout << "killing workers" << endl; | 
|  | 308 | signal_all(pipes); | 
|  | 309 | for (int i = 0; i < workers; i++) { | 
|  | 310 | int status; | 
|  | 311 | wait(&status); | 
|  | 312 | if (status != 0) { | 
|  | 313 | cout << "nonzero child status" << status << endl; | 
|  | 314 | } | 
|  | 315 | } | 
|  | 316 | return 0; | 
|  | 317 | } |