blob: 48fa771d201400eef8da8fd288b565b024f80601 [file] [log] [blame]
Josh Gaoc51726c2018-10-11 16:33:05 -07001/*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define TRACE_TAG USB
18
19#include "sysdeps.h"
20
21#include <errno.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <sys/ioctl.h>
26#include <sys/types.h>
27#include <unistd.h>
28
29#include <linux/usb/functionfs.h>
30#include <sys/eventfd.h>
31
Josh Gao86b33be2019-02-26 17:53:52 -080032#include <algorithm>
Josh Gaoc51726c2018-10-11 16:33:05 -070033#include <array>
34#include <future>
35#include <memory>
36#include <mutex>
37#include <optional>
38#include <vector>
39
40#include <asyncio/AsyncIO.h>
41
42#include <android-base/logging.h>
43#include <android-base/macros.h>
44#include <android-base/properties.h>
45#include <android-base/thread_annotations.h>
46
47#include <adbd/usb.h>
48
49#include "adb_unique_fd.h"
50#include "adb_utils.h"
51#include "sysdeps/chrono.h"
52#include "transport.h"
53#include "types.h"
54
55using android::base::StringPrintf;
56
Josh Gaoc0b831b2019-02-13 15:27:28 -080057// We can't find out whether we have support for AIO on ffs endpoints until we submit a read.
58static std::optional<bool> gFfsAioSupported;
59
Josh Gao08ccc732019-04-16 11:20:04 -070060// Not all USB controllers support operations larger than 16k, so don't go above that.
Josh Gaod0feaf92019-04-24 14:28:25 -070061// Also, each submitted operation does an allocation in the kernel of that size, so we want to
62// minimize our queue depth while still maintaining a deep enough queue to keep the USB stack fed.
63static constexpr size_t kUsbReadQueueDepth = 8;
Josh Gao08ccc732019-04-16 11:20:04 -070064static constexpr size_t kUsbReadSize = 4 * PAGE_SIZE;
Josh Gaoc51726c2018-10-11 16:33:05 -070065
Josh Gaod0feaf92019-04-24 14:28:25 -070066static constexpr size_t kUsbWriteQueueDepth = 8;
Josh Gao08ccc732019-04-16 11:20:04 -070067static constexpr size_t kUsbWriteSize = 4 * PAGE_SIZE;
Josh Gaoc51726c2018-10-11 16:33:05 -070068
Josh Gaoc51726c2018-10-11 16:33:05 -070069enum class TransferDirection : uint64_t {
70 READ = 0,
71 WRITE = 1,
72};
73
74struct TransferId {
75 TransferDirection direction : 1;
76 uint64_t id : 63;
77
78 TransferId() : TransferId(TransferDirection::READ, 0) {}
79
80 private:
81 TransferId(TransferDirection direction, uint64_t id) : direction(direction), id(id) {}
82
83 public:
84 explicit operator uint64_t() const {
85 uint64_t result;
86 static_assert(sizeof(*this) == sizeof(result));
87 memcpy(&result, this, sizeof(*this));
88 return result;
89 }
90
91 static TransferId read(uint64_t id) { return TransferId(TransferDirection::READ, id); }
92 static TransferId write(uint64_t id) { return TransferId(TransferDirection::WRITE, id); }
93
94 static TransferId from_value(uint64_t value) {
95 TransferId result;
96 memcpy(&result, &value, sizeof(value));
97 return result;
98 }
99};
100
101struct IoBlock {
Josh Gaob0195742019-03-18 14:11:28 -0700102 bool pending = false;
Evgenii Stepanov9da358d2019-05-15 18:45:01 -0700103 struct iocb control = {};
Josh Gao86b33be2019-02-26 17:53:52 -0800104 std::shared_ptr<Block> payload;
Josh Gaoc51726c2018-10-11 16:33:05 -0700105
106 TransferId id() const { return TransferId::from_value(control.aio_data); }
107};
108
109struct ScopedAioContext {
110 ScopedAioContext() = default;
111 ~ScopedAioContext() { reset(); }
112
113 ScopedAioContext(ScopedAioContext&& move) { reset(move.release()); }
114 ScopedAioContext(const ScopedAioContext& copy) = delete;
115
116 ScopedAioContext& operator=(ScopedAioContext&& move) {
117 reset(move.release());
118 return *this;
119 }
120 ScopedAioContext& operator=(const ScopedAioContext& copy) = delete;
121
122 static ScopedAioContext Create(size_t max_events) {
123 aio_context_t ctx = 0;
124 if (io_setup(max_events, &ctx) != 0) {
125 PLOG(FATAL) << "failed to create aio_context_t";
126 }
127 ScopedAioContext result;
128 result.reset(ctx);
129 return result;
130 }
131
132 aio_context_t release() {
133 aio_context_t result = context_;
134 context_ = 0;
135 return result;
136 }
137
138 void reset(aio_context_t new_context = 0) {
139 if (context_ != 0) {
140 io_destroy(context_);
141 }
142
143 context_ = new_context;
144 }
145
146 aio_context_t get() { return context_; }
147
148 private:
149 aio_context_t context_ = 0;
150};
151
152struct UsbFfsConnection : public Connection {
Josh Gaoc33aee82019-06-17 15:05:37 -0700153 UsbFfsConnection(unique_fd* control, unique_fd read, unique_fd write,
Josh Gaoc51726c2018-10-11 16:33:05 -0700154 std::promise<void> destruction_notifier)
Josh Gao19dc2962019-03-26 18:47:45 -0700155 : worker_started_(false),
156 stopped_(false),
Josh Gaoc51726c2018-10-11 16:33:05 -0700157 destruction_notifier_(std::move(destruction_notifier)),
Josh Gaoc33aee82019-06-17 15:05:37 -0700158 control_fd_(control),
Josh Gaoc51726c2018-10-11 16:33:05 -0700159 read_fd_(std::move(read)),
160 write_fd_(std::move(write)) {
161 LOG(INFO) << "UsbFfsConnection constructed";
Josh Gaoc0b831b2019-02-13 15:27:28 -0800162 worker_event_fd_.reset(eventfd(0, EFD_CLOEXEC));
163 if (worker_event_fd_ == -1) {
164 PLOG(FATAL) << "failed to create eventfd";
165 }
166
Josh Gaoc51726c2018-10-11 16:33:05 -0700167 aio_context_ = ScopedAioContext::Create(kUsbReadQueueDepth + kUsbWriteQueueDepth);
168 }
169
170 ~UsbFfsConnection() {
171 LOG(INFO) << "UsbFfsConnection being destroyed";
172 Stop();
173 monitor_thread_.join();
Josh Gaoc0b831b2019-02-13 15:27:28 -0800174
175 // We need to explicitly close our file descriptors before we notify our destruction,
176 // because the thread listening on the future will immediately try to reopen the endpoint.
Josh Gao19dc2962019-03-26 18:47:45 -0700177 aio_context_.reset();
Josh Gaoc0b831b2019-02-13 15:27:28 -0800178 read_fd_.reset();
179 write_fd_.reset();
180
Josh Gaoc51726c2018-10-11 16:33:05 -0700181 destruction_notifier_.set_value();
182 }
183
184 virtual bool Write(std::unique_ptr<apacket> packet) override final {
185 LOG(DEBUG) << "USB write: " << dump_header(&packet->msg);
186 Block header(sizeof(packet->msg));
187 memcpy(header.data(), &packet->msg, sizeof(packet->msg));
188
189 std::lock_guard<std::mutex> lock(write_mutex_);
190 write_requests_.push_back(CreateWriteBlock(std::move(header), next_write_id_++));
191 if (!packet->payload.empty()) {
Josh Gao86b33be2019-02-26 17:53:52 -0800192 // The kernel attempts to allocate a contiguous block of memory for each write,
193 // which can fail if the write is large and the kernel heap is fragmented.
194 // Split large writes into smaller chunks to avoid this.
195 std::shared_ptr<Block> payload = std::make_shared<Block>(std::move(packet->payload));
196 size_t offset = 0;
197 size_t len = payload->size();
198
199 while (len > 0) {
200 size_t write_size = std::min(kUsbWriteSize, len);
201 write_requests_.push_back(
202 CreateWriteBlock(payload, offset, write_size, next_write_id_++));
203 len -= write_size;
204 offset += write_size;
205 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700206 }
207 SubmitWrites();
208 return true;
209 }
210
211 virtual void Start() override final { StartMonitor(); }
212
213 virtual void Stop() override final {
214 if (stopped_.exchange(true)) {
215 return;
216 }
217 stopped_ = true;
218 uint64_t notify = 1;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800219 ssize_t rc = adb_write(worker_event_fd_.get(), &notify, sizeof(notify));
Josh Gaoc51726c2018-10-11 16:33:05 -0700220 if (rc < 0) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800221 PLOG(FATAL) << "failed to notify worker eventfd to stop UsbFfsConnection";
Josh Gaoc51726c2018-10-11 16:33:05 -0700222 }
223 CHECK_EQ(static_cast<size_t>(rc), sizeof(notify));
224 }
225
226 private:
227 void StartMonitor() {
228 // This is a bit of a mess.
229 // It's possible for io_submit to end up blocking, if we call it as the endpoint
230 // becomes disabled. Work around this by having a monitor thread to listen for functionfs
231 // lifecycle events. If we notice an error condition (either we've become disabled, or we
232 // were never enabled in the first place), we send interruption signals to the worker thread
233 // until it dies, and then report failure to the transport via HandleError, which will
234 // eventually result in the transport being destroyed, which will result in UsbFfsConnection
235 // being destroyed, which unblocks the open thread and restarts this entire process.
Josh Gaoc51726c2018-10-11 16:33:05 -0700236 static std::once_flag handler_once;
237 std::call_once(handler_once, []() { signal(kInterruptionSignal, [](int) {}); });
238
239 monitor_thread_ = std::thread([this]() {
240 adb_thread_setname("UsbFfs-monitor");
241
Josh Gao6933d542019-03-26 13:21:42 -0700242 bool enabled = false;
Josh Gaoc51726c2018-10-11 16:33:05 -0700243 bool running = true;
244 while (running) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800245 adb_pollfd pfd[2] = {
Josh Gaoc33aee82019-06-17 15:05:37 -0700246 {.fd = control_fd_->get(), .events = POLLIN, .revents = 0},
Josh Gaoc0b831b2019-02-13 15:27:28 -0800247 };
Josh Gao19dc2962019-03-26 18:47:45 -0700248
Josh Gaoc33aee82019-06-17 15:05:37 -0700249 int rc = TEMP_FAILURE_RETRY(adb_poll(pfd, 2, -1));
Josh Gaoc0b831b2019-02-13 15:27:28 -0800250 if (rc == -1) {
251 PLOG(FATAL) << "poll on USB control fd failed";
Josh Gaoc0b831b2019-02-13 15:27:28 -0800252 }
253
254 if (pfd[1].revents) {
Josh Gaoc33aee82019-06-17 15:05:37 -0700255 // We were told to die, continue reading until FUNCTIONFS_UNBIND.
Josh Gaoc51726c2018-10-11 16:33:05 -0700256 }
257
258 struct usb_functionfs_event event;
Josh Gaoc33aee82019-06-17 15:05:37 -0700259 rc = TEMP_FAILURE_RETRY(adb_read(control_fd_->get(), &event, sizeof(event)));
Josh Gao1f7ae9d2019-05-10 11:37:34 -0700260 if (rc == -1) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700261 PLOG(FATAL) << "failed to read functionfs event";
Josh Gao1f7ae9d2019-05-10 11:37:34 -0700262 } else if (rc == 0) {
263 LOG(WARNING) << "hit EOF on functionfs control fd";
264 break;
265 } else if (rc != sizeof(event)) {
266 LOG(FATAL) << "read functionfs event of unexpected size, expected "
267 << sizeof(event) << ", got " << rc;
Josh Gaoc51726c2018-10-11 16:33:05 -0700268 }
269
270 LOG(INFO) << "USB event: "
Josh Gaoc33aee82019-06-17 15:05:37 -0700271 << ffs_event_to_string(
272 static_cast<usb_functionfs_event_type>(event.type));
Josh Gaoc51726c2018-10-11 16:33:05 -0700273
274 switch (event.type) {
275 case FUNCTIONFS_BIND:
Josh Gaoc33aee82019-06-17 15:05:37 -0700276 LOG(FATAL) << "received FUNCTIONFS_BIND after already opened?";
Josh Gaoc51726c2018-10-11 16:33:05 -0700277 break;
278
279 case FUNCTIONFS_ENABLE:
Josh Gao87afd522019-03-28 11:05:53 -0700280 if (enabled) {
281 LOG(WARNING) << "received FUNCTIONFS_ENABLE while already enabled?";
282 running = false;
Josh Gao94fb36b2019-05-01 16:53:53 -0700283 break;
Josh Gao87afd522019-03-28 11:05:53 -0700284 }
285
286 enabled = true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700287 StartWorker();
288 break;
289
290 case FUNCTIONFS_DISABLE:
Josh Gao87afd522019-03-28 11:05:53 -0700291 if (!enabled) {
292 LOG(WARNING) << "received FUNCTIONFS_DISABLE while not enabled?";
293 }
294
295 enabled = false;
Josh Gao6933d542019-03-26 13:21:42 -0700296 running = false;
297 break;
298
299 case FUNCTIONFS_UNBIND:
Josh Gao87afd522019-03-28 11:05:53 -0700300 if (enabled) {
301 LOG(WARNING) << "received FUNCTIONFS_UNBIND while still enabled?";
302 }
Josh Gao6933d542019-03-26 13:21:42 -0700303
Josh Gaoc51726c2018-10-11 16:33:05 -0700304 running = false;
305 break;
Josh Gao12807c72019-05-15 18:03:29 -0700306
307 case FUNCTIONFS_SETUP: {
Josh Gaoc33aee82019-06-17 15:05:37 -0700308 read_functionfs_setup(*control_fd_, &event);
309 break;
Josh Gao12807c72019-05-15 18:03:29 -0700310 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700311 }
312 }
313
Josh Gaoe778b3a2019-02-28 13:29:32 -0800314 StopWorker();
Josh Gao19dc2962019-03-26 18:47:45 -0700315 HandleError("monitor thread finished");
Josh Gaoc51726c2018-10-11 16:33:05 -0700316 });
317 }
318
319 void StartWorker() {
Josh Gao19dc2962019-03-26 18:47:45 -0700320 CHECK(!worker_started_);
321 worker_started_ = true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700322 worker_thread_ = std::thread([this]() {
323 adb_thread_setname("UsbFfs-worker");
324 for (size_t i = 0; i < kUsbReadQueueDepth; ++i) {
325 read_requests_[i] = CreateReadBlock(next_read_id_++);
Josh Gaoc0b831b2019-02-13 15:27:28 -0800326 if (!SubmitRead(&read_requests_[i])) {
327 return;
328 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700329 }
330
331 while (!stopped_) {
332 uint64_t dummy;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800333 ssize_t rc = adb_read(worker_event_fd_.get(), &dummy, sizeof(dummy));
Josh Gaoc51726c2018-10-11 16:33:05 -0700334 if (rc == -1) {
Josh Gaoc33aee82019-06-17 15:05:37 -0700335 if (errno == EINTR) {
336 // We were interrupted either to stop, or because of a backtrace.
337 // Check stopped_ again to see if we need to exit.
338 continue;
339 }
340
Josh Gaoc51726c2018-10-11 16:33:05 -0700341 PLOG(FATAL) << "failed to read from eventfd";
342 } else if (rc == 0) {
343 LOG(FATAL) << "hit EOF on eventfd";
344 }
345
Josh Gao6933d542019-03-26 13:21:42 -0700346 ReadEvents();
Josh Gaoc51726c2018-10-11 16:33:05 -0700347 }
348 });
349 }
350
Josh Gaoe778b3a2019-02-28 13:29:32 -0800351 void StopWorker() {
Josh Gao19dc2962019-03-26 18:47:45 -0700352 if (!worker_started_) {
353 return;
354 }
355
Josh Gaoe778b3a2019-02-28 13:29:32 -0800356 pthread_t worker_thread_handle = worker_thread_.native_handle();
357 while (true) {
358 int rc = pthread_kill(worker_thread_handle, kInterruptionSignal);
359 if (rc != 0) {
360 LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
361 break;
362 }
363
364 std::this_thread::sleep_for(100ms);
365
366 rc = pthread_kill(worker_thread_handle, 0);
367 if (rc == 0) {
368 continue;
369 } else if (rc == ESRCH) {
370 break;
371 } else {
372 LOG(ERROR) << "failed to send interruption signal to worker: " << strerror(rc);
373 }
374 }
375
376 worker_thread_.join();
Josh Gaoc33aee82019-06-17 15:05:37 -0700377 worker_started_ = false;
Josh Gaoe778b3a2019-02-28 13:29:32 -0800378 }
379
Josh Gaoc51726c2018-10-11 16:33:05 -0700380 void PrepareReadBlock(IoBlock* block, uint64_t id) {
381 block->pending = false;
Josh Gao86b33be2019-02-26 17:53:52 -0800382 block->payload = std::make_shared<Block>(kUsbReadSize);
Josh Gaoc51726c2018-10-11 16:33:05 -0700383 block->control.aio_data = static_cast<uint64_t>(TransferId::read(id));
Josh Gao86b33be2019-02-26 17:53:52 -0800384 block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload->data());
385 block->control.aio_nbytes = block->payload->size();
Josh Gaoc51726c2018-10-11 16:33:05 -0700386 }
387
388 IoBlock CreateReadBlock(uint64_t id) {
389 IoBlock block;
390 PrepareReadBlock(&block, id);
391 block.control.aio_rw_flags = 0;
392 block.control.aio_lio_opcode = IOCB_CMD_PREAD;
393 block.control.aio_reqprio = 0;
394 block.control.aio_fildes = read_fd_.get();
395 block.control.aio_offset = 0;
396 block.control.aio_flags = IOCB_FLAG_RESFD;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800397 block.control.aio_resfd = worker_event_fd_.get();
Josh Gaoc51726c2018-10-11 16:33:05 -0700398 return block;
399 }
400
Josh Gao6933d542019-03-26 13:21:42 -0700401 void ReadEvents() {
Josh Gaoc51726c2018-10-11 16:33:05 -0700402 static constexpr size_t kMaxEvents = kUsbReadQueueDepth + kUsbWriteQueueDepth;
403 struct io_event events[kMaxEvents];
404 struct timespec timeout = {.tv_sec = 0, .tv_nsec = 0};
405 int rc = io_getevents(aio_context_.get(), 0, kMaxEvents, events, &timeout);
406 if (rc == -1) {
407 HandleError(StringPrintf("io_getevents failed while reading: %s", strerror(errno)));
408 return;
409 }
410
411 for (int event_idx = 0; event_idx < rc; ++event_idx) {
412 auto& event = events[event_idx];
413 TransferId id = TransferId::from_value(event.data);
414
415 if (event.res < 0) {
416 std::string error =
417 StringPrintf("%s %" PRIu64 " failed with error %s",
418 id.direction == TransferDirection::READ ? "read" : "write",
419 id.id, strerror(-event.res));
420 HandleError(error);
421 return;
422 }
423
424 if (id.direction == TransferDirection::READ) {
Josh Gaob41d7ae2019-06-13 13:11:18 -0700425 if (!HandleRead(id, event.res)) {
426 return;
427 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700428 } else {
429 HandleWrite(id);
430 }
431 }
432 }
433
Josh Gaob41d7ae2019-06-13 13:11:18 -0700434 bool HandleRead(TransferId id, int64_t size) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700435 uint64_t read_idx = id.id % kUsbReadQueueDepth;
436 IoBlock* block = &read_requests_[read_idx];
437 block->pending = false;
Josh Gao86b33be2019-02-26 17:53:52 -0800438 block->payload->resize(size);
Josh Gaoc51726c2018-10-11 16:33:05 -0700439
440 // Notification for completed reads can be received out of order.
441 if (block->id().id != needed_read_id_) {
442 LOG(VERBOSE) << "read " << block->id().id << " completed while waiting for "
443 << needed_read_id_;
Josh Gaob41d7ae2019-06-13 13:11:18 -0700444 return true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700445 }
446
447 for (uint64_t id = needed_read_id_;; ++id) {
448 size_t read_idx = id % kUsbReadQueueDepth;
449 IoBlock* current_block = &read_requests_[read_idx];
450 if (current_block->pending) {
451 break;
452 }
Josh Gaob41d7ae2019-06-13 13:11:18 -0700453 if (!ProcessRead(current_block)) {
454 return false;
455 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700456 ++needed_read_id_;
457 }
Josh Gaob41d7ae2019-06-13 13:11:18 -0700458
459 return true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700460 }
461
Josh Gaob41d7ae2019-06-13 13:11:18 -0700462 bool ProcessRead(IoBlock* block) {
Josh Gao86b33be2019-02-26 17:53:52 -0800463 if (!block->payload->empty()) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700464 if (!incoming_header_.has_value()) {
Josh Gaob41d7ae2019-06-13 13:11:18 -0700465 if (block->payload->size() != sizeof(amessage)) {
466 HandleError("received packet of unexpected length while reading header");
467 return false;
468 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700469 amessage msg;
Josh Gao86b33be2019-02-26 17:53:52 -0800470 memcpy(&msg, block->payload->data(), sizeof(amessage));
Josh Gaoc51726c2018-10-11 16:33:05 -0700471 LOG(DEBUG) << "USB read:" << dump_header(&msg);
472 incoming_header_ = msg;
473 } else {
474 size_t bytes_left = incoming_header_->data_length - incoming_payload_.size();
Josh Gao86b33be2019-02-26 17:53:52 -0800475 Block payload = std::move(*block->payload);
Josh Gaob41d7ae2019-06-13 13:11:18 -0700476 if (block->payload->size() > bytes_left) {
477 HandleError("received too many bytes while waiting for payload");
478 return false;
479 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700480 incoming_payload_.append(std::make_unique<Block>(std::move(payload)));
481 }
482
483 if (incoming_header_->data_length == incoming_payload_.size()) {
484 auto packet = std::make_unique<apacket>();
485 packet->msg = *incoming_header_;
486
487 // TODO: Make apacket contain an IOVector so we don't have to coalesce.
488 packet->payload = incoming_payload_.coalesce();
489 read_callback_(this, std::move(packet));
490
491 incoming_header_.reset();
492 incoming_payload_.clear();
493 }
494 }
495
496 PrepareReadBlock(block, block->id().id + kUsbReadQueueDepth);
497 SubmitRead(block);
Josh Gaob41d7ae2019-06-13 13:11:18 -0700498 return true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700499 }
500
Josh Gaoc0b831b2019-02-13 15:27:28 -0800501 bool SubmitRead(IoBlock* block) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700502 block->pending = true;
503 struct iocb* iocb = &block->control;
504 if (io_submit(aio_context_.get(), 1, &iocb) != 1) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800505 if (errno == EINVAL && !gFfsAioSupported.has_value()) {
506 HandleError("failed to submit first read, AIO on FFS not supported");
507 gFfsAioSupported = false;
508 return false;
509 }
510
Josh Gaoc51726c2018-10-11 16:33:05 -0700511 HandleError(StringPrintf("failed to submit read: %s", strerror(errno)));
Josh Gaoc0b831b2019-02-13 15:27:28 -0800512 return false;
Josh Gaoc51726c2018-10-11 16:33:05 -0700513 }
Josh Gaoc0b831b2019-02-13 15:27:28 -0800514
515 gFfsAioSupported = true;
516 return true;
Josh Gaoc51726c2018-10-11 16:33:05 -0700517 }
518
519 void HandleWrite(TransferId id) {
520 std::lock_guard<std::mutex> lock(write_mutex_);
521 auto it =
522 std::find_if(write_requests_.begin(), write_requests_.end(), [id](const auto& req) {
523 return static_cast<uint64_t>(req->id()) == static_cast<uint64_t>(id);
524 });
525 CHECK(it != write_requests_.end());
526
527 write_requests_.erase(it);
528 size_t outstanding_writes = --writes_submitted_;
529 LOG(DEBUG) << "USB write: reaped, down to " << outstanding_writes;
530
531 SubmitWrites();
532 }
533
Josh Gao86b33be2019-02-26 17:53:52 -0800534 std::unique_ptr<IoBlock> CreateWriteBlock(std::shared_ptr<Block> payload, size_t offset,
535 size_t len, uint64_t id) {
Josh Gaoc51726c2018-10-11 16:33:05 -0700536 auto block = std::make_unique<IoBlock>();
537 block->payload = std::move(payload);
538 block->control.aio_data = static_cast<uint64_t>(TransferId::write(id));
539 block->control.aio_rw_flags = 0;
540 block->control.aio_lio_opcode = IOCB_CMD_PWRITE;
541 block->control.aio_reqprio = 0;
542 block->control.aio_fildes = write_fd_.get();
Josh Gao86b33be2019-02-26 17:53:52 -0800543 block->control.aio_buf = reinterpret_cast<uintptr_t>(block->payload->data() + offset);
544 block->control.aio_nbytes = len;
Josh Gaoc51726c2018-10-11 16:33:05 -0700545 block->control.aio_offset = 0;
546 block->control.aio_flags = IOCB_FLAG_RESFD;
Josh Gaoc0b831b2019-02-13 15:27:28 -0800547 block->control.aio_resfd = worker_event_fd_.get();
Josh Gaoc51726c2018-10-11 16:33:05 -0700548 return block;
549 }
550
Josh Gao86b33be2019-02-26 17:53:52 -0800551 std::unique_ptr<IoBlock> CreateWriteBlock(Block payload, uint64_t id) {
552 std::shared_ptr<Block> block = std::make_shared<Block>(std::move(payload));
553 size_t len = block->size();
554 return CreateWriteBlock(std::move(block), 0, len, id);
555 }
556
Josh Gaoc51726c2018-10-11 16:33:05 -0700557 void SubmitWrites() REQUIRES(write_mutex_) {
558 if (writes_submitted_ == kUsbWriteQueueDepth) {
559 return;
560 }
561
562 ssize_t writes_to_submit = std::min(kUsbWriteQueueDepth - writes_submitted_,
563 write_requests_.size() - writes_submitted_);
564 CHECK_GE(writes_to_submit, 0);
565 if (writes_to_submit == 0) {
566 return;
567 }
568
569 struct iocb* iocbs[kUsbWriteQueueDepth];
570 for (int i = 0; i < writes_to_submit; ++i) {
571 CHECK(!write_requests_[writes_submitted_ + i]->pending);
572 write_requests_[writes_submitted_ + i]->pending = true;
573 iocbs[i] = &write_requests_[writes_submitted_ + i]->control;
574 LOG(VERBOSE) << "submitting write_request " << static_cast<void*>(iocbs[i]);
575 }
576
Josh Gao63b52ec2019-03-26 13:06:38 -0700577 writes_submitted_ += writes_to_submit;
578
Josh Gaoc51726c2018-10-11 16:33:05 -0700579 int rc = io_submit(aio_context_.get(), writes_to_submit, iocbs);
580 if (rc == -1) {
581 HandleError(StringPrintf("failed to submit write requests: %s", strerror(errno)));
582 return;
583 } else if (rc != writes_to_submit) {
584 LOG(FATAL) << "failed to submit all writes: wanted to submit " << writes_to_submit
585 << ", actually submitted " << rc;
586 }
Josh Gaoc51726c2018-10-11 16:33:05 -0700587 }
588
589 void HandleError(const std::string& error) {
590 std::call_once(error_flag_, [&]() {
591 error_callback_(this, error);
592 if (!stopped_) {
593 Stop();
594 }
595 });
596 }
597
598 std::thread monitor_thread_;
Josh Gao19dc2962019-03-26 18:47:45 -0700599
600 bool worker_started_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700601 std::thread worker_thread_;
602
603 std::atomic<bool> stopped_;
604 std::promise<void> destruction_notifier_;
605 std::once_flag error_flag_;
606
Josh Gaoc0b831b2019-02-13 15:27:28 -0800607 unique_fd worker_event_fd_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700608
609 ScopedAioContext aio_context_;
Josh Gaoc33aee82019-06-17 15:05:37 -0700610
611 // We keep a pointer to the control fd, so that we can reuse it to avoid USB reconfiguration,
612 // and still be able to reset it to force a reopen after FUNCTIONFS_UNBIND or running into an
613 // unexpected situation.
614 unique_fd* control_fd_;
Josh Gaoc51726c2018-10-11 16:33:05 -0700615 unique_fd read_fd_;
616 unique_fd write_fd_;
617
618 std::optional<amessage> incoming_header_;
619 IOVector incoming_payload_;
620
621 std::array<IoBlock, kUsbReadQueueDepth> read_requests_;
622 IOVector read_data_;
623
624 // ID of the next request that we're going to send out.
625 size_t next_read_id_ = 0;
626
627 // ID of the next packet we're waiting for.
628 size_t needed_read_id_ = 0;
629
630 std::mutex write_mutex_;
631 std::deque<std::unique_ptr<IoBlock>> write_requests_ GUARDED_BY(write_mutex_);
632 size_t next_write_id_ GUARDED_BY(write_mutex_) = 0;
633 size_t writes_submitted_ GUARDED_BY(write_mutex_) = 0;
Josh Gaoe778b3a2019-02-28 13:29:32 -0800634
635 static constexpr int kInterruptionSignal = SIGUSR1;
Josh Gaoc51726c2018-10-11 16:33:05 -0700636};
637
Josh Gaoc0b831b2019-02-13 15:27:28 -0800638void usb_init_legacy();
639
Josh Gaoc51726c2018-10-11 16:33:05 -0700640static void usb_ffs_open_thread() {
641 adb_thread_setname("usb ffs open");
642
Josh Gaoc33aee82019-06-17 15:05:37 -0700643 unique_fd control;
644 unique_fd bulk_out;
645 unique_fd bulk_in;
646
Josh Gaoc51726c2018-10-11 16:33:05 -0700647 while (true) {
Josh Gaoc0b831b2019-02-13 15:27:28 -0800648 if (gFfsAioSupported.has_value() && !gFfsAioSupported.value()) {
649 LOG(INFO) << "failed to use nonblocking ffs, falling back to legacy";
650 return usb_init_legacy();
651 }
652
Josh Gaoc51726c2018-10-11 16:33:05 -0700653 if (!open_functionfs(&control, &bulk_out, &bulk_in)) {
654 std::this_thread::sleep_for(1s);
655 continue;
656 }
657
658 atransport* transport = new atransport();
659 transport->serial = "UsbFfs";
660 std::promise<void> destruction_notifier;
661 std::future<void> future = destruction_notifier.get_future();
662 transport->SetConnection(std::make_unique<UsbFfsConnection>(
Josh Gaoc33aee82019-06-17 15:05:37 -0700663 &control, std::move(bulk_out), std::move(bulk_in),
Josh Gaoc51726c2018-10-11 16:33:05 -0700664 std::move(destruction_notifier)));
665 register_transport(transport);
666 future.wait();
667 }
668}
669
Josh Gaoc51726c2018-10-11 16:33:05 -0700670void usb_init() {
Josh Gao8038e352019-03-18 16:33:18 -0700671 bool use_nonblocking = android::base::GetBoolProperty(
672 "persist.adb.nonblocking_ffs",
673 android::base::GetBoolProperty("ro.adb.nonblocking_ffs", true));
674
Josh Gao02e94a42019-02-28 07:26:20 +0000675 if (use_nonblocking) {
Josh Gao0d780392019-02-26 22:10:33 +0000676 std::thread(usb_ffs_open_thread).detach();
Josh Gao02e94a42019-02-28 07:26:20 +0000677 } else {
678 usb_init_legacy();
Josh Gaoc51726c2018-10-11 16:33:05 -0700679 }
680}