blob: a9ad805d8e45392096203b6529fedad2bf6f34f4 [file] [log] [blame]
Josh Gaoc51726c2018-10-11 16:33:05 -07001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define TRACE_TAG USB
18
19#include "sysdeps.h"
20
21#include <errno.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <sys/ioctl.h>
26#include <sys/types.h>
27#include <unistd.h>
28
29#include <linux/usb/functionfs.h>
30#include <sys/eventfd.h>
31
Josh Gao86b33be2019-02-26 17:53:52 -080032#include <algorithm>
Josh Gaoc51726c2018-10-11 16:33:05 -070033#include <array>
34#include <future>
35#include <memory>
36#include <mutex>
37#include <optional>
38#include <vector>
39
40#include <asyncio/AsyncIO.h>
41
42#include <android-base/logging.h>
43#include <android-base/macros.h>
44#include <android-base/properties.h>
45#include <android-base/thread_annotations.h>
46
47#include <adbd/usb.h>
48
49#include "adb_unique_fd.h"
50#include "adb_utils.h"
51#include "sysdeps/chrono.h"
52#include "transport.h"
53#include "types.h"
54
55using android::base::StringPrintf;
56
Josh Gaoc0b831b2019-02-13 15:27:28 -080057// We can't find out whether we have support for AIO on ffs endpoints until we submit a read.
58static std::optional<bool> gFfsAioSupported;
59
Josh Gao08ccc732019-04-16 11:20:04 -070060// Not all USB controllers support operations larger than 16k, so don't go above that.
Josh Gaod0feaf92019-04-24 14:28:25 -070061// Also, each submitted operation does an allocation in the kernel of that size, so we want to
62// minimize our queue depth while still maintaining a deep enough queue to keep the USB stack fed.
63static constexpr size_t kUsbReadQueueDepth = 8;
Josh Gao08ccc732019-04-16 11:20:04 -070064static constexpr size_t kUsbReadSize = 4 * PAGE_SIZE;
Josh Gaoc51726c2018-10-11 16:33:05 -070065
Josh Gaod0feaf92019-04-24 14:28:25 -070066static constexpr size_t kUsbWriteQueueDepth = 8;
Josh Gao08ccc732019-04-16 11:20:04 -070067static constexpr size_t kUsbWriteSize = 4 * PAGE_SIZE;
Josh Gaoc51726c2018-10-11 16:33:05 -070068
Dan Albert782036b2019-06-24 14:35:35 -070069static const char* to_string(enum usb_functionfs_event_type type) {
70 switch (type) {
71 case FUNCTIONFS_BIND:
72 return "FUNCTIONFS_BIND";
73 case FUNCTIONFS_UNBIND:
74 return "FUNCTIONFS_UNBIND";
75 case FUNCTIONFS_ENABLE:
76 return "FUNCTIONFS_ENABLE";
77 case FUNCTIONFS_DISABLE:
78 return "FUNCTIONFS_DISABLE";
79 case FUNCTIONFS_SETUP:
80 return "FUNCTIONFS_SETUP";
81 case FUNCTIONFS_SUSPEND:
82 return "FUNCTIONFS_SUSPEND";
83 case FUNCTIONFS_RESUME:
84 return "FUNCTIONFS_RESUME";
85 }
86}
87
Josh Gaoc51726c2018-10-11 16:33:05 -070088enum class TransferDirection : uint64_t {
89 READ = 0,
90 WRITE = 1,
91};
92
93struct TransferId {
94 TransferDirection direction : 1;
95 uint64_t id : 63;
96
97 TransferId() : TransferId(TransferDirection::READ, 0) {}
98
99 private:
100 TransferId(TransferDirection direction, uint64_t id) : direction(direction), id(id) {}
101
102 public:
103 explicit operator uint64_t() const {
104 uint64_t result;
105 static_assert(sizeof(*this) == sizeof(result));
106 memcpy(&result, this, sizeof(*this));
107 return result;
108 }
109
110 static TransferId read(uint64_t id) { return TransferId(TransferDirection::READ, id); }
111 static TransferId write(uint64_t id) { return TransferId(TransferDirection::WRITE, id); }
112
113 static TransferId from_value(uint64_t value) {
114 TransferId result;
115 memcpy(&result, &value, sizeof(value));
116 return result;
117 }
118};
119
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700120template <class Payload>
Josh Gaoc51726c2018-10-11 16:33:05 -0700121struct IoBlock {
Josh Gaob0195742019-03-18 14:11:28 -0700122 bool pending = false;
Evgenii Stepanov9da358d2019-05-15 18:45:01 -0700123 struct iocb control = {};
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700124 Payload payload;
Josh Gaoc51726c2018-10-11 16:33:05 -0700125
126 TransferId id() const { return TransferId::from_value(control.aio_data); }
127};
128
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700129using IoReadBlock = IoBlock<Block>;
130using IoWriteBlock = IoBlock<std::shared_ptr<Block>>;
131
Josh Gaoc51726c2018-10-11 16:33:05 -0700132struct ScopedAioContext {
133 ScopedAioContext() = default;
134 ~ScopedAioContext() { reset(); }
135
136 ScopedAioContext(ScopedAioContext&& move) { reset(move.release()); }
137 ScopedAioContext(const ScopedAioContext& copy) = delete;
138
139 ScopedAioContext& operator=(ScopedAioContext&& move) {
140 reset(move.release());
141 return *this;
142 }
143 ScopedAioContext& operator=(const ScopedAioContext& copy) = delete;
144
145 static ScopedAioContext Create(size_t max_events) {
146 aio_context_t ctx = 0;
147 if (io_setup(max_events, &ctx) != 0) {
148 PLOG(FATAL) << "failed to create aio_context_t";
149 }
150 ScopedAioContext result;
151 result.reset(ctx);
152 return result;
153 }
154
155 aio_context_t release() {
156 aio_context_t result = context_;
157 context_ = 0;
158 return result;
159 }
160
161 void reset(aio_context_t new_context = 0) {
162 if (context_ != 0) {
163 io_destroy(context_);
164 }
165
166 context_ = new_context;
167 }
168
169 aio_context_t get() { return context_; }
170
171 private:
172 aio_context_t context_ = 0;
173};
174
175struct UsbFfsConnection : public Connection {
Dan Albert782036b2019-06-24 14:35:35 -0700176 UsbFfsConnection(unique_fd control, unique_fd read, unique_fd write,
Josh Gaoc51726c2018-10-11 16:33:05 -0700177 std::promise<void> destruction_notifier)
Josh Gao19dc2962019-03-26 18:47:45 -0700178 : worker_started_(false),
179 stopped_(false),
Josh Gaoc51726c2018-10-11 16:33:05 -0700180 destruction_notifier_(std::move(destruction_notifier)),
Dan Albert782036b2019-06-24 14:35:35 -0700181 control_fd_(std::move(control)),
Josh Gaoc51726c2018-10-11 16:33:05 -0700182 read_fd_(std::move(read)),
183 write_fd_(std::move(write)) {
184 LOG(INFO) << "UsbFfsConnection constructed";
Josh Gaoc0b831b2019-02-13 15:27:28 -0800185 worker_event_fd_.reset(eventfd(0, EFD_CLOEXEC));
186 if (worker_event_fd_ == -1) {
187 PLOG(FATAL) << "failed to create eventfd";
188 }
189
Dan Albert782036b2019-06-24 14:35:35 -0700190 monitor_event_fd_.reset(eventfd(0, EFD_CLOEXEC));
191 if (monitor_event_fd_ == -1) {
192 PLOG(FATAL) << "failed to create eventfd";
193 }
194
Josh Gaoc51726c2018-10-11 16:33:05 -0700195 aio_context_ = ScopedAioContext::Create(kUsbReadQueueDepth + kUsbWriteQueueDepth);
196 }
197
198 ~UsbFfsConnection() {
199 LOG(INFO) << "UsbFfsConnection being destroyed";
200 Stop();
201 monitor_thread_.join();
Josh Gaoc0b831b2019-02-13 15:27:28 -0800202
203 // We need to explicitly close our file descriptors before we notify our destruction,
204 // because the thread listening on the future will immediately try to reopen the endpoint.
Josh Gao19dc2962019-03-26 18:47:45 -0700205 aio_context_.reset();
Dan Albert782036b2019-06-24 14:35:35 -0700206 control_fd_.reset();
Josh Gaoc0b831b2019-02-13 15:27:28 -0800207 read_fd_.reset();
208 write_fd_.reset();
209
Josh Gaoc51726c2018-10-11 16:33:05 -0700210 destruction_notifier_.set_value();
211 }
212
213 virtual bool Write(std::unique_ptr<apacket> packet) override final {
214 LOG(DEBUG) << "USB write: " << dump_header(&packet->msg);
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700215 auto header = std::make_shared<Block>(sizeof(packet->msg));
216 memcpy(header->data(), &packet->msg, sizeof(packet->msg));
Josh Gaoc51726c2018-10-11 16:33:05 -0700217
218 std::lock_guard<std::mutex> lock(write_mutex_);
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700219 write_requests_.push_back(
220 CreateWriteBlock(std::move(header), 0, sizeof(packet->msg), next_write_id_++));
Josh Gaoc51726c2018-10-11 16:33:05 -0700221 if (!packet->payload.empty()) {
Josh Gao86b33be2019-02-26 17:53:52 -0800222 // The kernel attempts to allocate a contiguous block of memory for each write,
223 // which can fail if the write is large and the kernel heap is fragmented.
224 // Split large writes into smaller chunks to avoid this.
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700225 auto payload = std::make_shared<Block>(std::move(packet->payload));
Josh Gao86b33be2019-02-26 17:53:52 -0800226 size_t offset = 0;
227 size_t len = payload->size();
228
229 while (len > 0) {
230 size_t write_size = std::min(kUsbWriteSize, len);
231 write_requests_.push_back(
232 CreateWriteBlock(payload, offset, write_size, next_write_id_++));
233 len -= write_size;
234 offset += write_size;
235 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700236 }
237 SubmitWrites();
238 return true;
239 }
240
241 virtual void Start() override final { StartMonitor(); }
242
243 virtual void Stop() override final {
244 if (stopped_.exchange(true)) {
245 return;
246 }
247 stopped_ = true;
248 uint64_t notify = 1;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800249 ssize_t rc = adb_write(worker_event_fd_.get(), &notify, sizeof(notify));
Josh Gaoc51726c2018-10-11 16:33:05 -0700250 if (rc < 0) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800251 PLOG(FATAL) << "failed to notify worker eventfd to stop UsbFfsConnection";
Josh Gaoc51726c2018-10-11 16:33:05 -0700252 }
253 CHECK_EQ(static_cast<size_t>(rc), sizeof(notify));
Dan Albert782036b2019-06-24 14:35:35 -0700254
255 rc = adb_write(monitor_event_fd_.get(), &notify, sizeof(notify));
256 if (rc < 0) {
257 PLOG(FATAL) << "failed to notify monitor eventfd to stop UsbFfsConnection";
258 }
259
260 CHECK_EQ(static_cast<size_t>(rc), sizeof(notify));
Josh Gaoc51726c2018-10-11 16:33:05 -0700261 }
262
263 private:
264 void StartMonitor() {
265 // This is a bit of a mess.
266 // It's possible for io_submit to end up blocking, if we call it as the endpoint
267 // becomes disabled. Work around this by having a monitor thread to listen for functionfs
268 // lifecycle events. If we notice an error condition (either we've become disabled, or we
269 // were never enabled in the first place), we send interruption signals to the worker thread
270 // until it dies, and then report failure to the transport via HandleError, which will
271 // eventually result in the transport being destroyed, which will result in UsbFfsConnection
272 // being destroyed, which unblocks the open thread and restarts this entire process.
Josh Gaoc51726c2018-10-11 16:33:05 -0700273 static std::once_flag handler_once;
274 std::call_once(handler_once, []() { signal(kInterruptionSignal, [](int) {}); });
275
276 monitor_thread_ = std::thread([this]() {
277 adb_thread_setname("UsbFfs-monitor");
278
Dan Albert782036b2019-06-24 14:35:35 -0700279 bool bound = false;
Josh Gao6933d542019-03-26 13:21:42 -0700280 bool enabled = false;
Josh Gaoc51726c2018-10-11 16:33:05 -0700281 bool running = true;
282 while (running) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800283 adb_pollfd pfd[2] = {
Dan Albert782036b2019-06-24 14:35:35 -0700284 { .fd = control_fd_.get(), .events = POLLIN, .revents = 0 },
285 { .fd = monitor_event_fd_.get(), .events = POLLIN, .revents = 0 },
Josh Gaoc0b831b2019-02-13 15:27:28 -0800286 };
Josh Gao19dc2962019-03-26 18:47:45 -0700287
Dan Albert782036b2019-06-24 14:35:35 -0700288 // If we don't see our first bind within a second, try again.
289 int timeout_ms = bound ? -1 : 1000;
290
291 int rc = TEMP_FAILURE_RETRY(adb_poll(pfd, 2, timeout_ms));
Josh Gaoc0b831b2019-02-13 15:27:28 -0800292 if (rc == -1) {
293 PLOG(FATAL) << "poll on USB control fd failed";
Dan Albert782036b2019-06-24 14:35:35 -0700294 } else if (rc == 0) {
295 LOG(WARNING) << "timed out while waiting for FUNCTIONFS_BIND, trying again";
296 break;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800297 }
298
299 if (pfd[1].revents) {
Dan Albert782036b2019-06-24 14:35:35 -0700300 // We were told to die.
301 break;
Josh Gaoc51726c2018-10-11 16:33:05 -0700302 }
303
304 struct usb_functionfs_event event;
Dan Albert782036b2019-06-24 14:35:35 -0700305 rc = TEMP_FAILURE_RETRY(adb_read(control_fd_.get(), &event, sizeof(event)));
Josh Gao1f7ae9d2019-05-10 11:37:34 -0700306 if (rc == -1) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700307 PLOG(FATAL) << "failed to read functionfs event";
Josh Gao1f7ae9d2019-05-10 11:37:34 -0700308 } else if (rc == 0) {
309 LOG(WARNING) << "hit EOF on functionfs control fd";
310 break;
311 } else if (rc != sizeof(event)) {
312 LOG(FATAL) << "read functionfs event of unexpected size, expected "
313 << sizeof(event) << ", got " << rc;
Josh Gaoc51726c2018-10-11 16:33:05 -0700314 }
315
316 LOG(INFO) << "USB event: "
Dan Albert782036b2019-06-24 14:35:35 -0700317 << to_string(static_cast<usb_functionfs_event_type>(event.type));
Josh Gaoc51726c2018-10-11 16:33:05 -0700318
319 switch (event.type) {
320 case FUNCTIONFS_BIND:
Dan Albert782036b2019-06-24 14:35:35 -0700321 if (bound) {
322 LOG(WARNING) << "received FUNCTIONFS_BIND while already bound?";
323 running = false;
324 break;
325 }
326
327 if (enabled) {
328 LOG(WARNING) << "received FUNCTIONFS_BIND while already enabled?";
329 running = false;
330 break;
331 }
332
333 bound = true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700334 break;
335
336 case FUNCTIONFS_ENABLE:
Dan Albert782036b2019-06-24 14:35:35 -0700337 if (!bound) {
338 LOG(WARNING) << "received FUNCTIONFS_ENABLE while not bound?";
339 running = false;
340 break;
341 }
342
Josh Gao87afd522019-03-28 11:05:53 -0700343 if (enabled) {
344 LOG(WARNING) << "received FUNCTIONFS_ENABLE while already enabled?";
345 running = false;
Josh Gao94fb36b2019-05-01 16:53:53 -0700346 break;
Josh Gao87afd522019-03-28 11:05:53 -0700347 }
348
349 enabled = true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700350 StartWorker();
351 break;
352
353 case FUNCTIONFS_DISABLE:
Dan Albert782036b2019-06-24 14:35:35 -0700354 if (!bound) {
355 LOG(WARNING) << "received FUNCTIONFS_DISABLE while not bound?";
356 }
357
Josh Gao87afd522019-03-28 11:05:53 -0700358 if (!enabled) {
359 LOG(WARNING) << "received FUNCTIONFS_DISABLE while not enabled?";
360 }
361
362 enabled = false;
Josh Gao6933d542019-03-26 13:21:42 -0700363 running = false;
364 break;
365
366 case FUNCTIONFS_UNBIND:
Josh Gao87afd522019-03-28 11:05:53 -0700367 if (enabled) {
368 LOG(WARNING) << "received FUNCTIONFS_UNBIND while still enabled?";
369 }
Josh Gao6933d542019-03-26 13:21:42 -0700370
Dan Albert782036b2019-06-24 14:35:35 -0700371 if (!bound) {
372 LOG(WARNING) << "received FUNCTIONFS_UNBIND when not bound?";
373 }
374
375 bound = false;
Josh Gaoc51726c2018-10-11 16:33:05 -0700376 running = false;
377 break;
Josh Gao12807c72019-05-15 18:03:29 -0700378
379 case FUNCTIONFS_SETUP: {
Dan Albert782036b2019-06-24 14:35:35 -0700380 LOG(INFO) << "received FUNCTIONFS_SETUP control transfer: bRequestType = "
381 << static_cast<int>(event.u.setup.bRequestType)
382 << ", bRequest = " << static_cast<int>(event.u.setup.bRequest)
383 << ", wValue = " << static_cast<int>(event.u.setup.wValue)
384 << ", wIndex = " << static_cast<int>(event.u.setup.wIndex)
385 << ", wLength = " << static_cast<int>(event.u.setup.wLength);
386
387 if ((event.u.setup.bRequestType & USB_DIR_IN)) {
388 LOG(INFO) << "acking device-to-host control transfer";
389 ssize_t rc = adb_write(control_fd_.get(), "", 0);
390 if (rc != 0) {
391 PLOG(ERROR) << "failed to write empty packet to host";
392 break;
393 }
394 } else {
395 std::string buf;
396 buf.resize(event.u.setup.wLength + 1);
397
398 ssize_t rc = adb_read(control_fd_.get(), buf.data(), buf.size());
399 if (rc != event.u.setup.wLength) {
400 LOG(ERROR)
401 << "read " << rc
402 << " bytes when trying to read control request, expected "
403 << event.u.setup.wLength;
404 }
405
406 LOG(INFO) << "control request contents: " << buf;
407 break;
408 }
Josh Gao12807c72019-05-15 18:03:29 -0700409 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700410 }
411 }
412
Josh Gaoe778b3a2019-02-28 13:29:32 -0800413 StopWorker();
Josh Gao19dc2962019-03-26 18:47:45 -0700414 HandleError("monitor thread finished");
Josh Gaoc51726c2018-10-11 16:33:05 -0700415 });
416 }
417
418 void StartWorker() {
Josh Gao19dc2962019-03-26 18:47:45 -0700419 CHECK(!worker_started_);
420 worker_started_ = true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700421 worker_thread_ = std::thread([this]() {
422 adb_thread_setname("UsbFfs-worker");
423 for (size_t i = 0; i < kUsbReadQueueDepth; ++i) {
424 read_requests_[i] = CreateReadBlock(next_read_id_++);
Josh Gaoc0b831b2019-02-13 15:27:28 -0800425 if (!SubmitRead(&read_requests_[i])) {
426 return;
427 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700428 }
429
430 while (!stopped_) {
431 uint64_t dummy;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800432 ssize_t rc = adb_read(worker_event_fd_.get(), &dummy, sizeof(dummy));
Josh Gaoc51726c2018-10-11 16:33:05 -0700433 if (rc == -1) {
434 PLOG(FATAL) << "failed to read from eventfd";
435 } else if (rc == 0) {
436 LOG(FATAL) << "hit EOF on eventfd";
437 }
438
Josh Gao6933d542019-03-26 13:21:42 -0700439 ReadEvents();
Josh Gaoc51726c2018-10-11 16:33:05 -0700440 }
441 });
442 }
443
Josh Gaoe778b3a2019-02-28 13:29:32 -0800444 void StopWorker() {
Josh Gao19dc2962019-03-26 18:47:45 -0700445 if (!worker_started_) {
446 return;
447 }
448
Josh Gaoe778b3a2019-02-28 13:29:32 -0800449 pthread_t worker_thread_handle = worker_thread_.native_handle();
450 while (true) {
451 int rc = pthread_kill(worker_thread_handle, kInterruptionSignal);
452 if (rc != 0) {
453 LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
454 break;
455 }
456
457 std::this_thread::sleep_for(100ms);
458
459 rc = pthread_kill(worker_thread_handle, 0);
460 if (rc == 0) {
461 continue;
462 } else if (rc == ESRCH) {
463 break;
464 } else {
465 LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
466 }
467 }
468
469 worker_thread_.join();
470 }
471
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700472 void PrepareReadBlock(IoReadBlock* block, uint64_t id) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700473 block->pending = false;
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700474 if (block->payload.capacity() >= kUsbReadSize) {
475 block->payload.resize(kUsbReadSize);
476 } else {
477 block->payload = Block(kUsbReadSize);
478 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700479 block->control.aio_data = static_cast<uint64_t>(TransferId::read(id));
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700480 block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload.data());
481 block->control.aio_nbytes = block->payload.size();
Josh Gaoc51726c2018-10-11 16:33:05 -0700482 }
483
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700484 IoReadBlock CreateReadBlock(uint64_t id) {
485 IoReadBlock block;
Josh Gaoc51726c2018-10-11 16:33:05 -0700486 PrepareReadBlock(&block, id);
487 block.control.aio_rw_flags = 0;
488 block.control.aio_lio_opcode = IOCB_CMD_PREAD;
489 block.control.aio_reqprio = 0;
490 block.control.aio_fildes = read_fd_.get();
491 block.control.aio_offset = 0;
492 block.control.aio_flags = IOCB_FLAG_RESFD;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800493 block.control.aio_resfd = worker_event_fd_.get();
Josh Gaoc51726c2018-10-11 16:33:05 -0700494 return block;
495 }
496
Josh Gao6933d542019-03-26 13:21:42 -0700497 void ReadEvents() {
Josh Gaoc51726c2018-10-11 16:33:05 -0700498 static constexpr size_t kMaxEvents = kUsbReadQueueDepth + kUsbWriteQueueDepth;
499 struct io_event events[kMaxEvents];
500 struct timespec timeout = {.tv_sec = 0, .tv_nsec = 0};
501 int rc = io_getevents(aio_context_.get(), 0, kMaxEvents, events, &timeout);
502 if (rc == -1) {
503 HandleError(StringPrintf("io_getevents failed while reading: %s", strerror(errno)));
504 return;
505 }
506
507 for (int event_idx = 0; event_idx < rc; ++event_idx) {
508 auto& event = events[event_idx];
509 TransferId id = TransferId::from_value(event.data);
510
511 if (event.res < 0) {
512 std::string error =
513 StringPrintf("%s %" PRIu64 " failed with error %s",
514 id.direction == TransferDirection::READ ? "read" : "write",
515 id.id, strerror(-event.res));
516 HandleError(error);
517 return;
518 }
519
520 if (id.direction == TransferDirection::READ) {
Dan Albert2547f742019-06-24 14:35:52 -0700521 HandleRead(id, event.res);
Josh Gaoc51726c2018-10-11 16:33:05 -0700522 } else {
523 HandleWrite(id);
524 }
525 }
526 }
527
Dan Albert2547f742019-06-24 14:35:52 -0700528 void HandleRead(TransferId id, int64_t size) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700529 uint64_t read_idx = id.id % kUsbReadQueueDepth;
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700530 IoReadBlock* block = &read_requests_[read_idx];
Josh Gaoc51726c2018-10-11 16:33:05 -0700531 block->pending = false;
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700532 block->payload.resize(size);
Josh Gaoc51726c2018-10-11 16:33:05 -0700533
534 // Notification for completed reads can be received out of order.
535 if (block->id().id != needed_read_id_) {
536 LOG(VERBOSE) << "read " << block->id().id << " completed while waiting for "
537 << needed_read_id_;
Dan Albert2547f742019-06-24 14:35:52 -0700538 return;
Josh Gaoc51726c2018-10-11 16:33:05 -0700539 }
540
541 for (uint64_t id = needed_read_id_;; ++id) {
542 size_t read_idx = id % kUsbReadQueueDepth;
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700543 IoReadBlock* current_block = &read_requests_[read_idx];
Josh Gaoc51726c2018-10-11 16:33:05 -0700544 if (current_block->pending) {
545 break;
546 }
Dan Albert2547f742019-06-24 14:35:52 -0700547 ProcessRead(current_block);
Josh Gaoc51726c2018-10-11 16:33:05 -0700548 ++needed_read_id_;
549 }
550 }
551
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700552 void ProcessRead(IoReadBlock* block) {
553 if (!block->payload.empty()) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700554 if (!incoming_header_.has_value()) {
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700555 CHECK_EQ(sizeof(amessage), block->payload.size());
556 amessage& msg = incoming_header_.emplace();
557 memcpy(&msg, block->payload.data(), sizeof(msg));
Josh Gaoc51726c2018-10-11 16:33:05 -0700558 LOG(DEBUG) << "USB read:" << dump_header(&msg);
559 incoming_header_ = msg;
560 } else {
561 size_t bytes_left = incoming_header_->data_length - incoming_payload_.size();
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700562 Block payload = std::move(block->payload);
Dan Albert2547f742019-06-24 14:35:52 -0700563 CHECK_LE(payload.size(), bytes_left);
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700564 incoming_payload_.append(std::move(payload));
Josh Gaoc51726c2018-10-11 16:33:05 -0700565 }
566
567 if (incoming_header_->data_length == incoming_payload_.size()) {
568 auto packet = std::make_unique<apacket>();
569 packet->msg = *incoming_header_;
570
571 // TODO: Make apacket contain an IOVector so we don't have to coalesce.
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700572 packet->payload = std::move(incoming_payload_).coalesce();
Josh Gaoc51726c2018-10-11 16:33:05 -0700573 read_callback_(this, std::move(packet));
574
575 incoming_header_.reset();
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700576 // reuse the capacity of the incoming payload while we can.
577 auto free_block = incoming_payload_.clear();
578 if (block->payload.capacity() == 0) {
579 block->payload = std::move(free_block);
580 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700581 }
582 }
583
584 PrepareReadBlock(block, block->id().id + kUsbReadQueueDepth);
585 SubmitRead(block);
586 }
587
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700588 bool SubmitRead(IoReadBlock* block) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700589 block->pending = true;
590 struct iocb* iocb = &block->control;
591 if (io_submit(aio_context_.get(), 1, &iocb) != 1) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800592 if (errno == EINVAL && !gFfsAioSupported.has_value()) {
593 HandleError("failed to submit first read, AIO on FFS not supported");
594 gFfsAioSupported = false;
595 return false;
596 }
597
Josh Gaoc51726c2018-10-11 16:33:05 -0700598 HandleError(StringPrintf("failed to submit read: %s", strerror(errno)));
Josh Gaoc0b831b2019-02-13 15:27:28 -0800599 return false;
Josh Gaoc51726c2018-10-11 16:33:05 -0700600 }
Josh Gaoc0b831b2019-02-13 15:27:28 -0800601
602 gFfsAioSupported = true;
603 return true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700604 }
605
606 void HandleWrite(TransferId id) {
607 std::lock_guard<std::mutex> lock(write_mutex_);
608 auto it =
609 std::find_if(write_requests_.begin(), write_requests_.end(), [id](const auto& req) {
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700610 return static_cast<uint64_t>(req.id()) == static_cast<uint64_t>(id);
Josh Gaoc51726c2018-10-11 16:33:05 -0700611 });
612 CHECK(it != write_requests_.end());
613
614 write_requests_.erase(it);
615 size_t outstanding_writes = --writes_submitted_;
616 LOG(DEBUG) << "USB write: reaped, down to " << outstanding_writes;
617
618 SubmitWrites();
619 }
620
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700621 IoWriteBlock CreateWriteBlock(std::shared_ptr<Block> payload, size_t offset, size_t len,
622 uint64_t id) {
623 auto block = IoWriteBlock();
624 block.payload = std::move(payload);
625 block.control.aio_data = static_cast<uint64_t>(TransferId::write(id));
626 block.control.aio_rw_flags = 0;
627 block.control.aio_lio_opcode = IOCB_CMD_PWRITE;
628 block.control.aio_reqprio = 0;
629 block.control.aio_fildes = write_fd_.get();
630 block.control.aio_buf = reinterpret_cast<uintptr_t>(block.payload->data() + offset);
631 block.control.aio_nbytes = len;
632 block.control.aio_offset = 0;
633 block.control.aio_flags = IOCB_FLAG_RESFD;
634 block.control.aio_resfd = worker_event_fd_.get();
Josh Gaoc51726c2018-10-11 16:33:05 -0700635 return block;
636 }
637
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700638 IoWriteBlock CreateWriteBlock(Block&& payload, uint64_t id) {
639 size_t len = payload.size();
640 return CreateWriteBlock(std::make_shared<Block>(std::move(payload)), 0, len, id);
Josh Gao86b33be2019-02-26 17:53:52 -0800641 }
642
Josh Gaoc51726c2018-10-11 16:33:05 -0700643 void SubmitWrites() REQUIRES(write_mutex_) {
644 if (writes_submitted_ == kUsbWriteQueueDepth) {
645 return;
646 }
647
648 ssize_t writes_to_submit = std::min(kUsbWriteQueueDepth - writes_submitted_,
649 write_requests_.size() - writes_submitted_);
650 CHECK_GE(writes_to_submit, 0);
651 if (writes_to_submit == 0) {
652 return;
653 }
654
655 struct iocb* iocbs[kUsbWriteQueueDepth];
656 for (int i = 0; i < writes_to_submit; ++i) {
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700657 CHECK(!write_requests_[writes_submitted_ + i].pending);
658 write_requests_[writes_submitted_ + i].pending = true;
659 iocbs[i] = &write_requests_[writes_submitted_ + i].control;
Josh Gaoc51726c2018-10-11 16:33:05 -0700660 LOG(VERBOSE) << "submitting write_request " << static_cast<void*>(iocbs[i]);
661 }
662
Josh Gao63b52ec2019-03-26 13:06:38 -0700663 writes_submitted_ += writes_to_submit;
664
Josh Gaoc51726c2018-10-11 16:33:05 -0700665 int rc = io_submit(aio_context_.get(), writes_to_submit, iocbs);
666 if (rc == -1) {
667 HandleError(StringPrintf("failed to submit write requests: %s", strerror(errno)));
668 return;
669 } else if (rc != writes_to_submit) {
670 LOG(FATAL) << "failed to submit all writes: wanted to submit " << writes_to_submit
671 << ", actually submitted " << rc;
672 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700673 }
674
675 void HandleError(const std::string& error) {
676 std::call_once(error_flag_, [&]() {
677 error_callback_(this, error);
678 if (!stopped_) {
679 Stop();
680 }
681 });
682 }
683
684 std::thread monitor_thread_;
Josh Gao19dc2962019-03-26 18:47:45 -0700685
686 bool worker_started_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700687 std::thread worker_thread_;
688
689 std::atomic<bool> stopped_;
690 std::promise<void> destruction_notifier_;
691 std::once_flag error_flag_;
692
Josh Gaoc0b831b2019-02-13 15:27:28 -0800693 unique_fd worker_event_fd_;
Dan Albert782036b2019-06-24 14:35:35 -0700694 unique_fd monitor_event_fd_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700695
696 ScopedAioContext aio_context_;
Dan Albert782036b2019-06-24 14:35:35 -0700697 unique_fd control_fd_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700698 unique_fd read_fd_;
699 unique_fd write_fd_;
700
701 std::optional<amessage> incoming_header_;
702 IOVector incoming_payload_;
703
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700704 std::array<IoReadBlock, kUsbReadQueueDepth> read_requests_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700705 IOVector read_data_;
706
707 // ID of the next request that we're going to send out.
708 size_t next_read_id_ = 0;
709
710 // ID of the next packet we're waiting for.
711 size_t needed_read_id_ = 0;
712
713 std::mutex write_mutex_;
Yurii Zubrytskyi5dda7f62019-07-12 14:11:54 -0700714 std::deque<IoWriteBlock> write_requests_ GUARDED_BY(write_mutex_);
Josh Gaoc51726c2018-10-11 16:33:05 -0700715 size_t next_write_id_ GUARDED_BY(write_mutex_) = 0;
716 size_t writes_submitted_ GUARDED_BY(write_mutex_) = 0;
Josh Gaoe778b3a2019-02-28 13:29:32 -0800717
718 static constexpr int kInterruptionSignal = SIGUSR1;
Josh Gaoc51726c2018-10-11 16:33:05 -0700719};
720
Josh Gaoc0b831b2019-02-13 15:27:28 -0800721void usb_init_legacy();
722
Josh Gaoc51726c2018-10-11 16:33:05 -0700723static void usb_ffs_open_thread() {
724 adb_thread_setname("usb ffs open");
725
726 while (true) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800727 if (gFfsAioSupported.has_value() && !gFfsAioSupported.value()) {
728 LOG(INFO) << "failed to use nonblocking ffs, falling back to legacy";
729 return usb_init_legacy();
730 }
731
Dan Albert782036b2019-06-24 14:35:35 -0700732 unique_fd control;
733 unique_fd bulk_out;
734 unique_fd bulk_in;
Josh Gaoc51726c2018-10-11 16:33:05 -0700735 if (!open_functionfs(&control, &bulk_out, &bulk_in)) {
736 std::this_thread::sleep_for(1s);
737 continue;
738 }
739
740 atransport* transport = new atransport();
741 transport->serial = "UsbFfs";
742 std::promise<void> destruction_notifier;
743 std::future<void> future = destruction_notifier.get_future();
744 transport->SetConnection(std::make_unique<UsbFfsConnection>(
Dan Albert782036b2019-06-24 14:35:35 -0700745 std::move(control), std::move(bulk_out), std::move(bulk_in),
Josh Gaoc51726c2018-10-11 16:33:05 -0700746 std::move(destruction_notifier)));
747 register_transport(transport);
748 future.wait();
749 }
750}
751
Josh Gaoc51726c2018-10-11 16:33:05 -0700752void usb_init() {
Josh Gao8038e352019-03-18 16:33:18 -0700753 bool use_nonblocking = android::base::GetBoolProperty(
754 "persist.adb.nonblocking_ffs",
755 android::base::GetBoolProperty("ro.adb.nonblocking_ffs", true));
756
Josh Gao02e94a42019-02-28 07:26:20 +0000757 if (use_nonblocking) {
Josh Gao0d780392019-02-26 22:10:33 +0000758 std::thread(usb_ffs_open_thread).detach();
Josh Gao02e94a42019-02-28 07:26:20 +0000759 } else {
760 usb_init_legacy();
Josh Gaoc51726c2018-10-11 16:33:05 -0700761 }
762}