Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2018 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define TRACE_TAG USB |
| 18 | |
| 19 | #include "sysdeps.h" |
| 20 | |
| 21 | #include <errno.h> |
| 22 | #include <stdio.h> |
| 23 | #include <stdlib.h> |
| 24 | #include <string.h> |
| 25 | #include <sys/ioctl.h> |
| 26 | #include <sys/types.h> |
| 27 | #include <unistd.h> |
| 28 | |
| 29 | #include <linux/usb/functionfs.h> |
| 30 | #include <sys/eventfd.h> |
| 31 | |
| 32 | #include <array> |
| 33 | #include <future> |
| 34 | #include <memory> |
| 35 | #include <mutex> |
| 36 | #include <optional> |
| 37 | #include <vector> |
| 38 | |
| 39 | #include <asyncio/AsyncIO.h> |
| 40 | |
| 41 | #include <android-base/logging.h> |
| 42 | #include <android-base/macros.h> |
| 43 | #include <android-base/properties.h> |
| 44 | #include <android-base/thread_annotations.h> |
| 45 | |
| 46 | #include <adbd/usb.h> |
| 47 | |
| 48 | #include "adb_unique_fd.h" |
| 49 | #include "adb_utils.h" |
| 50 | #include "sysdeps/chrono.h" |
| 51 | #include "transport.h" |
| 52 | #include "types.h" |
| 53 | |
| 54 | using android::base::StringPrintf; |
| 55 | |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 56 | // We can't find out whether we have support for AIO on ffs endpoints until we submit a read. |
| 57 | static std::optional<bool> gFfsAioSupported; |
| 58 | |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 59 | static constexpr size_t kUsbReadQueueDepth = 16; |
| 60 | static constexpr size_t kUsbReadSize = 16384; |
| 61 | |
| 62 | static constexpr size_t kUsbWriteQueueDepth = 16; |
| 63 | |
| 64 | static const char* to_string(enum usb_functionfs_event_type type) { |
| 65 | switch (type) { |
| 66 | case FUNCTIONFS_BIND: |
| 67 | return "FUNCTIONFS_BIND"; |
| 68 | case FUNCTIONFS_UNBIND: |
| 69 | return "FUNCTIONFS_UNBIND"; |
| 70 | case FUNCTIONFS_ENABLE: |
| 71 | return "FUNCTIONFS_ENABLE"; |
| 72 | case FUNCTIONFS_DISABLE: |
| 73 | return "FUNCTIONFS_DISABLE"; |
| 74 | case FUNCTIONFS_SETUP: |
| 75 | return "FUNCTIONFS_SETUP"; |
| 76 | case FUNCTIONFS_SUSPEND: |
| 77 | return "FUNCTIONFS_SUSPEND"; |
| 78 | case FUNCTIONFS_RESUME: |
| 79 | return "FUNCTIONFS_RESUME"; |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | enum class TransferDirection : uint64_t { |
| 84 | READ = 0, |
| 85 | WRITE = 1, |
| 86 | }; |
| 87 | |
| 88 | struct TransferId { |
| 89 | TransferDirection direction : 1; |
| 90 | uint64_t id : 63; |
| 91 | |
| 92 | TransferId() : TransferId(TransferDirection::READ, 0) {} |
| 93 | |
| 94 | private: |
| 95 | TransferId(TransferDirection direction, uint64_t id) : direction(direction), id(id) {} |
| 96 | |
| 97 | public: |
| 98 | explicit operator uint64_t() const { |
| 99 | uint64_t result; |
| 100 | static_assert(sizeof(*this) == sizeof(result)); |
| 101 | memcpy(&result, this, sizeof(*this)); |
| 102 | return result; |
| 103 | } |
| 104 | |
| 105 | static TransferId read(uint64_t id) { return TransferId(TransferDirection::READ, id); } |
| 106 | static TransferId write(uint64_t id) { return TransferId(TransferDirection::WRITE, id); } |
| 107 | |
| 108 | static TransferId from_value(uint64_t value) { |
| 109 | TransferId result; |
| 110 | memcpy(&result, &value, sizeof(value)); |
| 111 | return result; |
| 112 | } |
| 113 | }; |
| 114 | |
| 115 | struct IoBlock { |
| 116 | bool pending; |
| 117 | struct iocb control; |
| 118 | Block payload; |
| 119 | |
| 120 | TransferId id() const { return TransferId::from_value(control.aio_data); } |
| 121 | }; |
| 122 | |
| 123 | struct ScopedAioContext { |
| 124 | ScopedAioContext() = default; |
| 125 | ~ScopedAioContext() { reset(); } |
| 126 | |
| 127 | ScopedAioContext(ScopedAioContext&& move) { reset(move.release()); } |
| 128 | ScopedAioContext(const ScopedAioContext& copy) = delete; |
| 129 | |
| 130 | ScopedAioContext& operator=(ScopedAioContext&& move) { |
| 131 | reset(move.release()); |
| 132 | return *this; |
| 133 | } |
| 134 | ScopedAioContext& operator=(const ScopedAioContext& copy) = delete; |
| 135 | |
| 136 | static ScopedAioContext Create(size_t max_events) { |
| 137 | aio_context_t ctx = 0; |
| 138 | if (io_setup(max_events, &ctx) != 0) { |
| 139 | PLOG(FATAL) << "failed to create aio_context_t"; |
| 140 | } |
| 141 | ScopedAioContext result; |
| 142 | result.reset(ctx); |
| 143 | return result; |
| 144 | } |
| 145 | |
| 146 | aio_context_t release() { |
| 147 | aio_context_t result = context_; |
| 148 | context_ = 0; |
| 149 | return result; |
| 150 | } |
| 151 | |
| 152 | void reset(aio_context_t new_context = 0) { |
| 153 | if (context_ != 0) { |
| 154 | io_destroy(context_); |
| 155 | } |
| 156 | |
| 157 | context_ = new_context; |
| 158 | } |
| 159 | |
| 160 | aio_context_t get() { return context_; } |
| 161 | |
| 162 | private: |
| 163 | aio_context_t context_ = 0; |
| 164 | }; |
| 165 | |
| 166 | struct UsbFfsConnection : public Connection { |
| 167 | UsbFfsConnection(unique_fd control, unique_fd read, unique_fd write, |
| 168 | std::promise<void> destruction_notifier) |
| 169 | : stopped_(false), |
| 170 | destruction_notifier_(std::move(destruction_notifier)), |
| 171 | control_fd_(std::move(control)), |
| 172 | read_fd_(std::move(read)), |
| 173 | write_fd_(std::move(write)) { |
| 174 | LOG(INFO) << "UsbFfsConnection constructed"; |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 175 | worker_event_fd_.reset(eventfd(0, EFD_CLOEXEC)); |
| 176 | if (worker_event_fd_ == -1) { |
| 177 | PLOG(FATAL) << "failed to create eventfd"; |
| 178 | } |
| 179 | |
| 180 | monitor_event_fd_.reset(eventfd(0, EFD_CLOEXEC)); |
| 181 | if (monitor_event_fd_ == -1) { |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 182 | PLOG(FATAL) << "failed to create eventfd"; |
| 183 | } |
| 184 | |
| 185 | aio_context_ = ScopedAioContext::Create(kUsbReadQueueDepth + kUsbWriteQueueDepth); |
| 186 | } |
| 187 | |
| 188 | ~UsbFfsConnection() { |
| 189 | LOG(INFO) << "UsbFfsConnection being destroyed"; |
| 190 | Stop(); |
| 191 | monitor_thread_.join(); |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 192 | |
| 193 | // We need to explicitly close our file descriptors before we notify our destruction, |
| 194 | // because the thread listening on the future will immediately try to reopen the endpoint. |
| 195 | control_fd_.reset(); |
| 196 | read_fd_.reset(); |
| 197 | write_fd_.reset(); |
| 198 | |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 199 | destruction_notifier_.set_value(); |
| 200 | } |
| 201 | |
| 202 | virtual bool Write(std::unique_ptr<apacket> packet) override final { |
| 203 | LOG(DEBUG) << "USB write: " << dump_header(&packet->msg); |
| 204 | Block header(sizeof(packet->msg)); |
| 205 | memcpy(header.data(), &packet->msg, sizeof(packet->msg)); |
| 206 | |
| 207 | std::lock_guard<std::mutex> lock(write_mutex_); |
| 208 | write_requests_.push_back(CreateWriteBlock(std::move(header), next_write_id_++)); |
| 209 | if (!packet->payload.empty()) { |
| 210 | write_requests_.push_back( |
| 211 | CreateWriteBlock(std::move(packet->payload), next_write_id_++)); |
| 212 | } |
| 213 | SubmitWrites(); |
| 214 | return true; |
| 215 | } |
| 216 | |
| 217 | virtual void Start() override final { StartMonitor(); } |
| 218 | |
| 219 | virtual void Stop() override final { |
| 220 | if (stopped_.exchange(true)) { |
| 221 | return; |
| 222 | } |
| 223 | stopped_ = true; |
| 224 | uint64_t notify = 1; |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 225 | ssize_t rc = adb_write(worker_event_fd_.get(), ¬ify, sizeof(notify)); |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 226 | if (rc < 0) { |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 227 | PLOG(FATAL) << "failed to notify worker eventfd to stop UsbFfsConnection"; |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 228 | } |
| 229 | CHECK_EQ(static_cast<size_t>(rc), sizeof(notify)); |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 230 | |
| 231 | rc = adb_write(monitor_event_fd_.get(), ¬ify, sizeof(notify)); |
| 232 | if (rc < 0) { |
| 233 | PLOG(FATAL) << "failed to notify monitor eventfd to stop UsbFfsConnection"; |
| 234 | } |
| 235 | |
| 236 | CHECK_EQ(static_cast<size_t>(rc), sizeof(notify)); |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | private: |
| 240 | void StartMonitor() { |
| 241 | // This is a bit of a mess. |
| 242 | // It's possible for io_submit to end up blocking, if we call it as the endpoint |
| 243 | // becomes disabled. Work around this by having a monitor thread to listen for functionfs |
| 244 | // lifecycle events. If we notice an error condition (either we've become disabled, or we |
| 245 | // were never enabled in the first place), we send interruption signals to the worker thread |
| 246 | // until it dies, and then report failure to the transport via HandleError, which will |
| 247 | // eventually result in the transport being destroyed, which will result in UsbFfsConnection |
| 248 | // being destroyed, which unblocks the open thread and restarts this entire process. |
| 249 | static constexpr int kInterruptionSignal = SIGUSR1; |
| 250 | static std::once_flag handler_once; |
| 251 | std::call_once(handler_once, []() { signal(kInterruptionSignal, [](int) {}); }); |
| 252 | |
| 253 | monitor_thread_ = std::thread([this]() { |
| 254 | adb_thread_setname("UsbFfs-monitor"); |
| 255 | |
| 256 | bool bound = false; |
| 257 | bool started = false; |
| 258 | bool running = true; |
| 259 | while (running) { |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 260 | int timeout = -1; |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 261 | if (!bound || !started) { |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 262 | timeout = 5000 /*ms*/; |
| 263 | } |
| 264 | |
| 265 | adb_pollfd pfd[2] = { |
| 266 | { .fd = control_fd_.get(), .events = POLLIN, .revents = 0 }, |
| 267 | { .fd = monitor_event_fd_.get(), .events = POLLIN, .revents = 0 }, |
| 268 | }; |
| 269 | int rc = TEMP_FAILURE_RETRY(adb_poll(pfd, 2, timeout)); |
| 270 | if (rc == -1) { |
| 271 | PLOG(FATAL) << "poll on USB control fd failed"; |
| 272 | } else if (rc == 0) { |
| 273 | // Something in the kernel presumably went wrong. |
| 274 | // Close our endpoints, wait for a bit, and then try again. |
| 275 | aio_context_.reset(); |
| 276 | read_fd_.reset(); |
| 277 | write_fd_.reset(); |
| 278 | control_fd_.reset(); |
| 279 | std::this_thread::sleep_for(5s); |
| 280 | HandleError("didn't receive FUNCTIONFS_ENABLE, retrying"); |
| 281 | return; |
| 282 | } |
| 283 | |
| 284 | if (pfd[1].revents) { |
| 285 | // We were told to die. |
| 286 | break; |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | struct usb_functionfs_event event; |
| 290 | if (TEMP_FAILURE_RETRY(adb_read(control_fd_.get(), &event, sizeof(event))) != |
| 291 | sizeof(event)) { |
| 292 | PLOG(FATAL) << "failed to read functionfs event"; |
| 293 | } |
| 294 | |
| 295 | LOG(INFO) << "USB event: " |
| 296 | << to_string(static_cast<usb_functionfs_event_type>(event.type)); |
| 297 | |
| 298 | switch (event.type) { |
| 299 | case FUNCTIONFS_BIND: |
| 300 | CHECK(!started) << "received FUNCTIONFS_ENABLE while already bound?"; |
| 301 | bound = true; |
| 302 | break; |
| 303 | |
| 304 | case FUNCTIONFS_ENABLE: |
| 305 | CHECK(!started) << "received FUNCTIONFS_ENABLE while already running?"; |
| 306 | started = true; |
| 307 | StartWorker(); |
| 308 | break; |
| 309 | |
| 310 | case FUNCTIONFS_DISABLE: |
| 311 | running = false; |
| 312 | break; |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | pthread_t worker_thread_handle = worker_thread_.native_handle(); |
| 317 | while (true) { |
| 318 | int rc = pthread_kill(worker_thread_handle, kInterruptionSignal); |
| 319 | if (rc != 0) { |
| 320 | LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc); |
| 321 | break; |
| 322 | } |
| 323 | |
| 324 | std::this_thread::sleep_for(100ms); |
| 325 | |
| 326 | rc = pthread_kill(worker_thread_handle, 0); |
| 327 | if (rc == 0) { |
| 328 | continue; |
| 329 | } else if (rc == ESRCH) { |
| 330 | break; |
| 331 | } else { |
| 332 | LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc); |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | worker_thread_.join(); |
| 337 | |
| 338 | aio_context_.reset(); |
| 339 | read_fd_.reset(); |
| 340 | write_fd_.reset(); |
| 341 | }); |
| 342 | } |
| 343 | |
| 344 | void StartWorker() { |
| 345 | worker_thread_ = std::thread([this]() { |
| 346 | adb_thread_setname("UsbFfs-worker"); |
| 347 | for (size_t i = 0; i < kUsbReadQueueDepth; ++i) { |
| 348 | read_requests_[i] = CreateReadBlock(next_read_id_++); |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 349 | if (!SubmitRead(&read_requests_[i])) { |
| 350 | return; |
| 351 | } |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 352 | } |
| 353 | |
| 354 | while (!stopped_) { |
| 355 | uint64_t dummy; |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 356 | ssize_t rc = adb_read(worker_event_fd_.get(), &dummy, sizeof(dummy)); |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 357 | if (rc == -1) { |
| 358 | PLOG(FATAL) << "failed to read from eventfd"; |
| 359 | } else if (rc == 0) { |
| 360 | LOG(FATAL) << "hit EOF on eventfd"; |
| 361 | } |
| 362 | |
| 363 | WaitForEvents(); |
| 364 | } |
| 365 | }); |
| 366 | } |
| 367 | |
| 368 | void PrepareReadBlock(IoBlock* block, uint64_t id) { |
| 369 | block->pending = false; |
| 370 | block->payload.resize(kUsbReadSize); |
| 371 | block->control.aio_data = static_cast<uint64_t>(TransferId::read(id)); |
| 372 | block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload.data()); |
| 373 | block->control.aio_nbytes = block->payload.size(); |
| 374 | } |
| 375 | |
| 376 | IoBlock CreateReadBlock(uint64_t id) { |
| 377 | IoBlock block; |
| 378 | PrepareReadBlock(&block, id); |
| 379 | block.control.aio_rw_flags = 0; |
| 380 | block.control.aio_lio_opcode = IOCB_CMD_PREAD; |
| 381 | block.control.aio_reqprio = 0; |
| 382 | block.control.aio_fildes = read_fd_.get(); |
| 383 | block.control.aio_offset = 0; |
| 384 | block.control.aio_flags = IOCB_FLAG_RESFD; |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 385 | block.control.aio_resfd = worker_event_fd_.get(); |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 386 | return block; |
| 387 | } |
| 388 | |
| 389 | void WaitForEvents() { |
| 390 | static constexpr size_t kMaxEvents = kUsbReadQueueDepth + kUsbWriteQueueDepth; |
| 391 | struct io_event events[kMaxEvents]; |
| 392 | struct timespec timeout = {.tv_sec = 0, .tv_nsec = 0}; |
| 393 | int rc = io_getevents(aio_context_.get(), 0, kMaxEvents, events, &timeout); |
| 394 | if (rc == -1) { |
| 395 | HandleError(StringPrintf("io_getevents failed while reading: %s", strerror(errno))); |
| 396 | return; |
| 397 | } |
| 398 | |
| 399 | for (int event_idx = 0; event_idx < rc; ++event_idx) { |
| 400 | auto& event = events[event_idx]; |
| 401 | TransferId id = TransferId::from_value(event.data); |
| 402 | |
| 403 | if (event.res < 0) { |
| 404 | std::string error = |
| 405 | StringPrintf("%s %" PRIu64 " failed with error %s", |
| 406 | id.direction == TransferDirection::READ ? "read" : "write", |
| 407 | id.id, strerror(-event.res)); |
| 408 | HandleError(error); |
| 409 | return; |
| 410 | } |
| 411 | |
| 412 | if (id.direction == TransferDirection::READ) { |
| 413 | HandleRead(id, event.res); |
| 414 | } else { |
| 415 | HandleWrite(id); |
| 416 | } |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | void HandleRead(TransferId id, int64_t size) { |
| 421 | uint64_t read_idx = id.id % kUsbReadQueueDepth; |
| 422 | IoBlock* block = &read_requests_[read_idx]; |
| 423 | block->pending = false; |
| 424 | block->payload.resize(size); |
| 425 | |
| 426 | // Notification for completed reads can be received out of order. |
| 427 | if (block->id().id != needed_read_id_) { |
| 428 | LOG(VERBOSE) << "read " << block->id().id << " completed while waiting for " |
| 429 | << needed_read_id_; |
| 430 | return; |
| 431 | } |
| 432 | |
| 433 | for (uint64_t id = needed_read_id_;; ++id) { |
| 434 | size_t read_idx = id % kUsbReadQueueDepth; |
| 435 | IoBlock* current_block = &read_requests_[read_idx]; |
| 436 | if (current_block->pending) { |
| 437 | break; |
| 438 | } |
| 439 | ProcessRead(current_block); |
| 440 | ++needed_read_id_; |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | void ProcessRead(IoBlock* block) { |
| 445 | if (!block->payload.empty()) { |
| 446 | if (!incoming_header_.has_value()) { |
| 447 | CHECK_EQ(sizeof(amessage), block->payload.size()); |
| 448 | amessage msg; |
| 449 | memcpy(&msg, block->payload.data(), sizeof(amessage)); |
| 450 | LOG(DEBUG) << "USB read:" << dump_header(&msg); |
| 451 | incoming_header_ = msg; |
| 452 | } else { |
| 453 | size_t bytes_left = incoming_header_->data_length - incoming_payload_.size(); |
| 454 | Block payload = std::move(block->payload); |
| 455 | CHECK_LE(payload.size(), bytes_left); |
| 456 | incoming_payload_.append(std::make_unique<Block>(std::move(payload))); |
| 457 | } |
| 458 | |
| 459 | if (incoming_header_->data_length == incoming_payload_.size()) { |
| 460 | auto packet = std::make_unique<apacket>(); |
| 461 | packet->msg = *incoming_header_; |
| 462 | |
| 463 | // TODO: Make apacket contain an IOVector so we don't have to coalesce. |
| 464 | packet->payload = incoming_payload_.coalesce(); |
| 465 | read_callback_(this, std::move(packet)); |
| 466 | |
| 467 | incoming_header_.reset(); |
| 468 | incoming_payload_.clear(); |
| 469 | } |
| 470 | } |
| 471 | |
| 472 | PrepareReadBlock(block, block->id().id + kUsbReadQueueDepth); |
| 473 | SubmitRead(block); |
| 474 | } |
| 475 | |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 476 | bool SubmitRead(IoBlock* block) { |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 477 | block->pending = true; |
| 478 | struct iocb* iocb = &block->control; |
| 479 | if (io_submit(aio_context_.get(), 1, &iocb) != 1) { |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 480 | if (errno == EINVAL && !gFfsAioSupported.has_value()) { |
| 481 | HandleError("failed to submit first read, AIO on FFS not supported"); |
| 482 | gFfsAioSupported = false; |
| 483 | return false; |
| 484 | } |
| 485 | |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 486 | HandleError(StringPrintf("failed to submit read: %s", strerror(errno))); |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 487 | return false; |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 488 | } |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 489 | |
| 490 | gFfsAioSupported = true; |
| 491 | return true; |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | void HandleWrite(TransferId id) { |
| 495 | std::lock_guard<std::mutex> lock(write_mutex_); |
| 496 | auto it = |
| 497 | std::find_if(write_requests_.begin(), write_requests_.end(), [id](const auto& req) { |
| 498 | return static_cast<uint64_t>(req->id()) == static_cast<uint64_t>(id); |
| 499 | }); |
| 500 | CHECK(it != write_requests_.end()); |
| 501 | |
| 502 | write_requests_.erase(it); |
| 503 | size_t outstanding_writes = --writes_submitted_; |
| 504 | LOG(DEBUG) << "USB write: reaped, down to " << outstanding_writes; |
| 505 | |
| 506 | SubmitWrites(); |
| 507 | } |
| 508 | |
| 509 | std::unique_ptr<IoBlock> CreateWriteBlock(Block payload, uint64_t id) { |
| 510 | auto block = std::make_unique<IoBlock>(); |
| 511 | block->payload = std::move(payload); |
| 512 | block->control.aio_data = static_cast<uint64_t>(TransferId::write(id)); |
| 513 | block->control.aio_rw_flags = 0; |
| 514 | block->control.aio_lio_opcode = IOCB_CMD_PWRITE; |
| 515 | block->control.aio_reqprio = 0; |
| 516 | block->control.aio_fildes = write_fd_.get(); |
| 517 | block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload.data()); |
| 518 | block->control.aio_nbytes = block->payload.size(); |
| 519 | block->control.aio_offset = 0; |
| 520 | block->control.aio_flags = IOCB_FLAG_RESFD; |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 521 | block->control.aio_resfd = worker_event_fd_.get(); |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 522 | return block; |
| 523 | } |
| 524 | |
| 525 | void SubmitWrites() REQUIRES(write_mutex_) { |
| 526 | if (writes_submitted_ == kUsbWriteQueueDepth) { |
| 527 | return; |
| 528 | } |
| 529 | |
| 530 | ssize_t writes_to_submit = std::min(kUsbWriteQueueDepth - writes_submitted_, |
| 531 | write_requests_.size() - writes_submitted_); |
| 532 | CHECK_GE(writes_to_submit, 0); |
| 533 | if (writes_to_submit == 0) { |
| 534 | return; |
| 535 | } |
| 536 | |
| 537 | struct iocb* iocbs[kUsbWriteQueueDepth]; |
| 538 | for (int i = 0; i < writes_to_submit; ++i) { |
| 539 | CHECK(!write_requests_[writes_submitted_ + i]->pending); |
| 540 | write_requests_[writes_submitted_ + i]->pending = true; |
| 541 | iocbs[i] = &write_requests_[writes_submitted_ + i]->control; |
| 542 | LOG(VERBOSE) << "submitting write_request " << static_cast<void*>(iocbs[i]); |
| 543 | } |
| 544 | |
| 545 | int rc = io_submit(aio_context_.get(), writes_to_submit, iocbs); |
| 546 | if (rc == -1) { |
| 547 | HandleError(StringPrintf("failed to submit write requests: %s", strerror(errno))); |
| 548 | return; |
| 549 | } else if (rc != writes_to_submit) { |
| 550 | LOG(FATAL) << "failed to submit all writes: wanted to submit " << writes_to_submit |
| 551 | << ", actually submitted " << rc; |
| 552 | } |
| 553 | |
| 554 | writes_submitted_ += rc; |
| 555 | } |
| 556 | |
| 557 | void HandleError(const std::string& error) { |
| 558 | std::call_once(error_flag_, [&]() { |
| 559 | error_callback_(this, error); |
| 560 | if (!stopped_) { |
| 561 | Stop(); |
| 562 | } |
| 563 | }); |
| 564 | } |
| 565 | |
| 566 | std::thread monitor_thread_; |
| 567 | std::thread worker_thread_; |
| 568 | |
| 569 | std::atomic<bool> stopped_; |
| 570 | std::promise<void> destruction_notifier_; |
| 571 | std::once_flag error_flag_; |
| 572 | |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 573 | unique_fd worker_event_fd_; |
| 574 | unique_fd monitor_event_fd_; |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 575 | |
| 576 | ScopedAioContext aio_context_; |
| 577 | unique_fd control_fd_; |
| 578 | unique_fd read_fd_; |
| 579 | unique_fd write_fd_; |
| 580 | |
| 581 | std::optional<amessage> incoming_header_; |
| 582 | IOVector incoming_payload_; |
| 583 | |
| 584 | std::array<IoBlock, kUsbReadQueueDepth> read_requests_; |
| 585 | IOVector read_data_; |
| 586 | |
| 587 | // ID of the next request that we're going to send out. |
| 588 | size_t next_read_id_ = 0; |
| 589 | |
| 590 | // ID of the next packet we're waiting for. |
| 591 | size_t needed_read_id_ = 0; |
| 592 | |
| 593 | std::mutex write_mutex_; |
| 594 | std::deque<std::unique_ptr<IoBlock>> write_requests_ GUARDED_BY(write_mutex_); |
| 595 | size_t next_write_id_ GUARDED_BY(write_mutex_) = 0; |
| 596 | size_t writes_submitted_ GUARDED_BY(write_mutex_) = 0; |
| 597 | }; |
| 598 | |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 599 | void usb_init_legacy(); |
| 600 | |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 601 | static void usb_ffs_open_thread() { |
| 602 | adb_thread_setname("usb ffs open"); |
| 603 | |
| 604 | while (true) { |
Josh Gao | c0b831b | 2019-02-13 15:27:28 -0800 | [diff] [blame] | 605 | if (gFfsAioSupported.has_value() && !gFfsAioSupported.value()) { |
| 606 | LOG(INFO) << "failed to use nonblocking ffs, falling back to legacy"; |
| 607 | return usb_init_legacy(); |
| 608 | } |
| 609 | |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 610 | unique_fd control; |
| 611 | unique_fd bulk_out; |
| 612 | unique_fd bulk_in; |
| 613 | if (!open_functionfs(&control, &bulk_out, &bulk_in)) { |
| 614 | std::this_thread::sleep_for(1s); |
| 615 | continue; |
| 616 | } |
| 617 | |
| 618 | atransport* transport = new atransport(); |
| 619 | transport->serial = "UsbFfs"; |
| 620 | std::promise<void> destruction_notifier; |
| 621 | std::future<void> future = destruction_notifier.get_future(); |
| 622 | transport->SetConnection(std::make_unique<UsbFfsConnection>( |
| 623 | std::move(control), std::move(bulk_out), std::move(bulk_in), |
| 624 | std::move(destruction_notifier))); |
| 625 | register_transport(transport); |
| 626 | future.wait(); |
| 627 | } |
| 628 | } |
| 629 | |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 630 | void usb_init() { |
Josh Gao | 02e94a4 | 2019-02-28 07:26:20 +0000 | [diff] [blame^] | 631 | bool use_nonblocking = android::base::GetBoolProperty("persist.adb.nonblocking_ffs", true); |
| 632 | if (use_nonblocking) { |
Josh Gao | 0d78039 | 2019-02-26 22:10:33 +0000 | [diff] [blame] | 633 | std::thread(usb_ffs_open_thread).detach(); |
Josh Gao | 02e94a4 | 2019-02-28 07:26:20 +0000 | [diff] [blame^] | 634 | } else { |
| 635 | usb_init_legacy(); |
Josh Gao | c51726c | 2018-10-11 16:33:05 -0700 | [diff] [blame] | 636 | } |
| 637 | } |