blob: 5d68890540df20ef2dd531379b88d94fcb1d0152 [file] [log] [blame]
Jerry Zhang487be612016-10-24 12:10:41 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <android-base/logging.h>
18#include <android-base/properties.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070019#include <asyncio/AsyncIO.h>
Jerry Zhang487be612016-10-24 12:10:41 -070020#include <dirent.h>
21#include <errno.h>
22#include <fcntl.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070023#include <memory>
Jerry Zhang487be612016-10-24 12:10:41 -070024#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070027#include <sys/eventfd.h>
Jerry Zhang487be612016-10-24 12:10:41 -070028#include <sys/ioctl.h>
Jerry Zhange9d94422017-01-18 12:03:56 -080029#include <sys/mman.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070030#include <sys/poll.h>
Jerry Zhang487be612016-10-24 12:10:41 -070031#include <sys/stat.h>
32#include <sys/types.h>
33#include <unistd.h>
Jerry Zhang487be612016-10-24 12:10:41 -070034
Jerry Zhangdf69dd32017-05-03 17:17:49 -070035#include "PosixAsyncIO.h"
Jerry Zhang69b74502017-10-02 16:26:37 -070036#include "MtpDescriptors.h"
Jerry Zhang487be612016-10-24 12:10:41 -070037#include "MtpFfsHandle.h"
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -080038#include "mtp.h"
Jerry Zhang487be612016-10-24 12:10:41 -070039
Jerry Zhang487be612016-10-24 12:10:41 -070040namespace {
41
Jerry Zhangdf69dd32017-05-03 17:17:49 -070042constexpr unsigned AIO_BUFS_MAX = 128;
43constexpr unsigned AIO_BUF_LEN = 16384;
Jerry Zhang487be612016-10-24 12:10:41 -070044
Jerry Zhangdf69dd32017-05-03 17:17:49 -070045constexpr unsigned FFS_NUM_EVENTS = 5;
Jerry Zhang487be612016-10-24 12:10:41 -070046
Jerry Zhangdf69dd32017-05-03 17:17:49 -070047constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
Jerry Zhangb4f54262017-02-02 18:14:33 -080048
Jerry Zhangdf69dd32017-05-03 17:17:49 -070049constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
James Weif2388b32018-12-18 17:39:58 +080050// Note: POLL_TIMEOUT_MS = 0 means return immediately i.e. no sleep.
51// And this will cause high CPU usage.
52constexpr int32_t POLL_TIMEOUT_MS = 500;
Jerry Zhang487be612016-10-24 12:10:41 -070053
Jerry Zhangdf69dd32017-05-03 17:17:49 -070054struct timespec ZERO_TIMEOUT = { 0, 0 };
Jerry Zhangb4f54262017-02-02 18:14:33 -080055
Jerry Zhangdf69dd32017-05-03 17:17:49 -070056struct mtp_device_status {
57 uint16_t wLength;
58 uint16_t wCode;
59};
60
Jerry Zhang487be612016-10-24 12:10:41 -070061} // anonymous namespace
62
63namespace android {
64
Jerry Zhangdf69dd32017-05-03 17:17:49 -070065int MtpFfsHandle::getPacketSize(int ffs_fd) {
66 struct usb_endpoint_descriptor desc;
67 if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
68 PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
69 return MAX_PACKET_SIZE_HS;
70 } else {
71 return desc.wMaxPacketSize;
72 }
73}
74
Jerry Zhang63dac452017-12-06 15:19:36 -080075MtpFfsHandle::MtpFfsHandle(int controlFd) {
76 mControl.reset(controlFd);
Ray Chibd3a6492021-06-01 16:40:33 +080077 mBatchCancel = android::base::GetBoolProperty("sys.usb.mtp.batchcancel", false);
Jerry Zhang63dac452017-12-06 15:19:36 -080078}
Jerry Zhang487be612016-10-24 12:10:41 -070079
80MtpFfsHandle::~MtpFfsHandle() {}
81
82void MtpFfsHandle::closeEndpoints() {
83 mIntr.reset();
84 mBulkIn.reset();
85 mBulkOut.reset();
86}
87
Jerry Zhang63dac452017-12-06 15:19:36 -080088bool MtpFfsHandle::openEndpoints(bool ptp) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -070089 if (mBulkIn < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080090 mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
Jerry Zhangdf69dd32017-05-03 17:17:49 -070091 if (mBulkIn < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080092 PLOG(ERROR) << (ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN) << ": cannot open bulk in ep";
Jerry Zhangdf69dd32017-05-03 17:17:49 -070093 return false;
94 }
95 }
96
97 if (mBulkOut < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080098 mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
Jerry Zhangdf69dd32017-05-03 17:17:49 -070099 if (mBulkOut < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -0800100 PLOG(ERROR) << (ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT) << ": cannot open bulk out ep";
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700101 return false;
102 }
103 }
104
105 if (mIntr < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -0800106 mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700107 if (mIntr < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -0800108 PLOG(ERROR) << (ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR) << ": cannot open intr ep";
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700109 return false;
110 }
111 }
112 return true;
113}
114
115void MtpFfsHandle::advise(int fd) {
116 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
117 if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
Chih-Hung Hsiehb6409542020-04-21 11:41:49 -0700118 POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) != 0)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700119 PLOG(ERROR) << "Failed to madvise";
120 }
121 if (posix_fadvise(fd, 0, 0,
Chih-Hung Hsiehb6409542020-04-21 11:41:49 -0700122 POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) != 0)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700123 PLOG(ERROR) << "Failed to fadvise";
124}
125
Jerry Zhang63dac452017-12-06 15:19:36 -0800126bool MtpFfsHandle::writeDescriptors(bool ptp) {
127 return ::android::writeDescriptors(mControl, ptp);
Jerry Zhang487be612016-10-24 12:10:41 -0700128}
129
130void MtpFfsHandle::closeConfig() {
131 mControl.reset();
132}
133
Jerry Zhang297912b2018-05-11 11:29:54 -0700134int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
135 struct io_event ioevs[AIO_BUFS_MAX];
136 size_t total = 0;
137
138 while (total < len) {
139 size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
140 int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
141 for (int i = 0; i < num_bufs; i++) {
142 mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
143 }
144 int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
145 if (ret < 0) return -1;
146 ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
147 if (ret < 0) return -1;
148 total += ret;
149 if (static_cast<size_t>(ret) < this_len) break;
Jerry Zhang487be612016-10-24 12:10:41 -0700150 }
Jerry Zhang297912b2018-05-11 11:29:54 -0700151
152 int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
153 if (len % packet_size == 0 && zero_packet) {
154 int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
155 if (ret < 0) return -1;
156 ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
157 if (ret < 0) return -1;
158 }
159
160 for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
161 mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
162 }
163 return total;
Jerry Zhang487be612016-10-24 12:10:41 -0700164}
165
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700166int MtpFfsHandle::read(void* data, size_t len) {
Jerry Zhang297912b2018-05-11 11:29:54 -0700167 // Zero packets are handled by receiveFile()
168 return doAsync(data, len, true, false);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700169}
170
171int MtpFfsHandle::write(const void* data, size_t len) {
Jerry Zhang297912b2018-05-11 11:29:54 -0700172 return doAsync(const_cast<void*>(data), len, false, true);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700173}
174
175int MtpFfsHandle::handleEvent() {
176
177 std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
178 usb_functionfs_event *event = events.data();
179 int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
180 events.size() * sizeof(usb_functionfs_event)));
181 if (nbytes == -1) {
182 return -1;
183 }
Jerry Zhang487be612016-10-24 12:10:41 -0700184 int ret = 0;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700185 for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
186 switch (event->type) {
187 case FUNCTIONFS_BIND:
188 case FUNCTIONFS_ENABLE:
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700189 ret = 0;
190 errno = 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700191 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700192 case FUNCTIONFS_UNBIND:
193 case FUNCTIONFS_DISABLE:
194 errno = ESHUTDOWN;
195 ret = -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700196 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700197 case FUNCTIONFS_SETUP:
198 if (handleControlRequest(&event->u.setup) == -1)
199 ret = -1;
200 break;
Jerry Zhang63dac452017-12-06 15:19:36 -0800201 case FUNCTIONFS_SUSPEND:
202 case FUNCTIONFS_RESUME:
203 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700204 default:
205 LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
206 }
Jerry Zhang487be612016-10-24 12:10:41 -0700207 }
208 return ret;
209}
210
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700211int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
212 uint8_t type = setup->bRequestType;
213 uint8_t code = setup->bRequest;
214 uint16_t length = setup->wLength;
215 uint16_t index = setup->wIndex;
216 uint16_t value = setup->wValue;
217 std::vector<char> buf;
218 buf.resize(length);
Jerry Zhang487be612016-10-24 12:10:41 -0700219
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700220 if (!(type & USB_DIR_IN)) {
221 if (::read(mControl, buf.data(), length) != length) {
222 PLOG(ERROR) << "Mtp error ctrlreq read data";
223 }
224 }
225
226 if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
227 switch(code) {
228 case MTP_REQ_RESET:
229 case MTP_REQ_CANCEL:
230 errno = ECANCELED;
James Weid97d79c2018-09-25 23:24:27 +0800231 return -1;
232 // break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700233 case MTP_REQ_GET_DEVICE_STATUS:
234 {
235 if (length < sizeof(struct mtp_device_status) + 4) {
236 errno = EINVAL;
237 return -1;
238 }
239 struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
240 st->wLength = htole16(sizeof(st));
241 if (mCanceled) {
242 st->wLength += 4;
243 st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
244 uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
245 endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
246 endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
247 mCanceled = false;
248 } else {
249 st->wCode = MTP_RESPONSE_OK;
250 }
251 length = st->wLength;
252 break;
253 }
254 default:
255 LOG(ERROR) << "Unrecognized Mtp class request! " << code;
256 }
257 } else {
258 LOG(ERROR) << "Unrecognized request type " << type;
259 }
260
261 if (type & USB_DIR_IN) {
262 if (::write(mControl, buf.data(), length) != length) {
263 PLOG(ERROR) << "Mtp error ctrlreq write data";
264 }
265 }
266 return 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700267}
268
Jerry Zhang63dac452017-12-06 15:19:36 -0800269int MtpFfsHandle::start(bool ptp) {
270 if (!openEndpoints(ptp))
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800271 return -1;
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800272
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700273 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
274 mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
275 mIobuf[i].iocb.resize(AIO_BUFS_MAX);
276 mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
277 mIobuf[i].buf.resize(AIO_BUFS_MAX);
278 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
279 mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
280 mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800281 }
282 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700283
284 memset(&mCtx, 0, sizeof(mCtx));
285 if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
286 PLOG(ERROR) << "unable to setup aio";
287 return -1;
288 }
289 mEventFd.reset(eventfd(0, EFD_NONBLOCK));
290 mPollFds[0].fd = mControl;
291 mPollFds[0].events = POLLIN;
292 mPollFds[1].fd = mEventFd;
293 mPollFds[1].events = POLLIN;
294
295 mCanceled = false;
Jerry Zhangb4f54262017-02-02 18:14:33 -0800296 return 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700297}
298
Jerry Zhang487be612016-10-24 12:10:41 -0700299void MtpFfsHandle::close() {
Shruti Bihani50bf46a2023-07-13 09:19:08 +0000300 auto timeout = std::chrono::seconds(2);
301 std::unique_lock lk(m);
302 cv.wait_for(lk, timeout ,[this]{return child_threads==0;});
303
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700304 io_destroy(mCtx);
Jerry Zhang487be612016-10-24 12:10:41 -0700305 closeEndpoints();
Jerry Zhang63dac452017-12-06 15:19:36 -0800306 closeConfig();
Jerry Zhang487be612016-10-24 12:10:41 -0700307}
308
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700309int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
310 int *counter) {
311 int num_events = 0;
312 int ret = 0;
313 int error = 0;
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700314
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700315 while (num_events < min_events) {
James Weif2388b32018-12-18 17:39:58 +0800316 if (poll(mPollFds, 2, POLL_TIMEOUT_MS) == -1) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700317 PLOG(ERROR) << "Mtp error during poll()";
318 return -1;
319 }
320 if (mPollFds[0].revents & POLLIN) {
321 mPollFds[0].revents = 0;
322 if (handleEvent() == -1) {
323 error = errno;
324 }
325 }
326 if (mPollFds[1].revents & POLLIN) {
327 mPollFds[1].revents = 0;
328 uint64_t ev_cnt = 0;
329
330 if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
331 PLOG(ERROR) << "Mtp unable to read eventfd";
332 error = errno;
333 continue;
334 }
335
336 // It's possible that io_getevents will return more events than the eventFd reported,
337 // since events may appear in the time between the calls. In this case, the eventFd will
338 // show up as readable next iteration, but there will be fewer or no events to actually
339 // wait for. Thus we never want io_getevents to block.
340 int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
341 if (this_events == -1) {
342 PLOG(ERROR) << "Mtp error getting events";
343 error = errno;
344 }
345 // Add up the total amount of data and find errors on the way.
346 for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
347 if (events[j].res < 0) {
348 errno = -events[j].res;
349 PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
350 error = errno;
351 }
352 ret += events[j].res;
353 }
354 num_events += this_events;
355 if (counter)
356 *counter += this_events;
357 }
358 if (error) {
359 errno = error;
360 ret = -1;
361 break;
362 }
363 }
364 return ret;
365}
366
367void MtpFfsHandle::cancelTransaction() {
368 // Device cancels by stalling both bulk endpoints.
Zijun Zhao05858802023-01-30 22:48:11 +0000369 if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700370 PLOG(ERROR) << "Mtp stall failed on bulk in";
Zijun Zhao05858802023-01-30 22:48:11 +0000371 if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700372 PLOG(ERROR) << "Mtp stall failed on bulk out";
373 mCanceled = true;
374 errno = ECANCELED;
375}
376
377int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
Ray Chibd3a6492021-06-01 16:40:33 +0800378 unsigned end, bool is_batch_cancel) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700379 // Some manpages for io_cancel are out of date and incorrect.
380 // io_cancel will return -EINPROGRESS on success and does
381 // not place the event in the given memory. We have to use
382 // io_getevents to wait for all the events we cancelled.
383 int ret = 0;
384 unsigned num_events = 0;
385 int save_errno = errno;
386 errno = 0;
387
388 for (unsigned j = start; j < end; j++) {
389 if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
390 PLOG(ERROR) << "Mtp couldn't cancel request " << j;
391 } else {
392 num_events++;
393 }
Ray Chibd3a6492021-06-01 16:40:33 +0800394 if (is_batch_cancel && num_events == 1) {
395 num_events = end - start;
396 break;
397 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700398 }
399 if (num_events != end - start) {
400 ret = -1;
401 errno = EIO;
402 }
403 int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
404 if (static_cast<unsigned>(evs) != num_events) {
405 PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
406 ret = -1;
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700407 }
Jerry Zhang487be612016-10-24 12:10:41 -0700408
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700409 uint64_t ev_cnt = 0;
410 if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
411 PLOG(ERROR) << "Mtp Unable to read event fd";
412
413 if (ret == 0) {
414 // Restore errno since it probably got overriden with EINPROGRESS.
415 errno = save_errno;
416 }
417 return ret;
418}
419
420int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
421 int ret = 0;
422 buf->actual = AIO_BUFS_MAX;
423 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
424 unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
425 io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
426 buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
427 buf->iocb[j]->aio_resfd = mEventFd;
428
429 // Not enough data, so table is truncated.
430 if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
431 buf->actual = j + 1;
432 break;
433 }
434 }
435
436 ret = io_submit(mCtx, buf->actual, buf->iocb.data());
437 if (ret != static_cast<int>(buf->actual)) {
438 PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
439 if (ret != -1) {
440 errno = EIO;
441 }
442 ret = -1;
443 }
444 return ret;
445}
446
447int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
448 // When receiving files, the incoming length is given in 32 bits.
449 // A >=4G file is given as 0xFFFFFFFF
450 uint32_t file_length = mfr.length;
451 uint64_t offset = mfr.offset;
Jerry Zhang487be612016-10-24 12:10:41 -0700452
453 struct aiocb aio;
454 aio.aio_fildes = mfr.fd;
455 aio.aio_buf = nullptr;
456 struct aiocb *aiol[] = {&aio};
Jerry Zhang487be612016-10-24 12:10:41 -0700457
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700458 int ret = -1;
459 unsigned i = 0;
460 size_t length;
461 struct io_event ioevs[AIO_BUFS_MAX];
462 bool has_write = false;
463 bool error = false;
464 bool write_error = false;
465 int packet_size = getPacketSize(mBulkOut);
466 bool short_packet = false;
467 advise(mfr.fd);
Jerry Zhang487be612016-10-24 12:10:41 -0700468
469 // Break down the file into pieces that fit in buffers
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700470 while (file_length > 0 || has_write) {
471 // Queue an asynchronous read from USB.
Jerry Zhang487be612016-10-24 12:10:41 -0700472 if (file_length > 0) {
473 length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700474 if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
475 error = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700476 }
477
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700478 // Get the return status of the last write request.
479 if (has_write) {
Jerry Zhang487be612016-10-24 12:10:41 -0700480 aio_suspend(aiol, 1, nullptr);
Jerry Zhang487be612016-10-24 12:10:41 -0700481 int written = aio_return(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700482 if (static_cast<size_t>(written) < aio.aio_nbytes) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700483 errno = written == -1 ? aio_error(&aio) : EIO;
484 PLOG(ERROR) << "Mtp error writing to disk";
485 write_error = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700486 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700487 has_write = false;
Jerry Zhang487be612016-10-24 12:10:41 -0700488 }
489
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700490 if (error) {
Jerry Zhang7063c932017-04-04 15:06:10 -0700491 return -1;
492 }
493
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700494 // Get the result of the read request, and queue a write to disk.
495 if (file_length > 0) {
496 unsigned num_events = 0;
497 ret = 0;
498 unsigned short_i = mIobuf[i].actual;
499 while (num_events < short_i) {
500 // Get all events up to the short read, if there is one.
501 // We must wait for each event since data transfer could end at any time.
502 int this_events = 0;
503 int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
504 num_events += this_events;
505
506 if (event_ret == -1) {
Ray Chibd3a6492021-06-01 16:40:33 +0800507 cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual,
508 mBatchCancel);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700509 return -1;
510 }
511 ret += event_ret;
512 for (int j = 0; j < this_events; j++) {
513 // struct io_event contains a pointer to the associated struct iocb as a __u64.
514 if (static_cast<__u64>(ioevs[j].res) <
515 reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
516 // We've found a short event. Store the index since
517 // events won't necessarily arrive in the order they are queued.
518 short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
519 / sizeof(struct iocb) + 1;
520 short_packet = true;
521 }
522 }
523 }
524 if (short_packet) {
Ray Chi3f658d22021-06-23 12:19:38 +0800525 if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual,
526 mBatchCancel)) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700527 write_error = true;
528 }
529 }
Jerry Zhang487be612016-10-24 12:10:41 -0700530 if (file_length == MAX_MTP_FILE_SIZE) {
531 // For larger files, receive until a short packet is received.
532 if (static_cast<size_t>(ret) < length) {
533 file_length = 0;
534 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700535 } else if (ret < static_cast<int>(length)) {
536 // If file is less than 4G and we get a short packet, it's an error.
537 errno = EIO;
538 LOG(ERROR) << "Mtp got unexpected short packet";
539 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700540 } else {
541 file_length -= ret;
542 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700543
544 if (write_error) {
545 cancelTransaction();
546 return -1;
547 }
548
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700549 // Enqueue a new write request
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700550 aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700551 aio_write(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700552
553 offset += ret;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700554 i = (i + 1) % NUM_IO_BUFS;
555 has_write = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700556 }
557 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700558 if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
559 // Receive an empty packet if size is a multiple of the endpoint size
560 // and we didn't already get an empty packet from the header or large file.
561 if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
Jerry Zhang54107562017-05-15 11:54:19 -0700562 return -1;
563 }
564 }
Jerry Zhang487be612016-10-24 12:10:41 -0700565 return 0;
566}
567
Jerry Zhang487be612016-10-24 12:10:41 -0700568int MtpFfsHandle::sendFile(mtp_file_range mfr) {
569 uint64_t file_length = mfr.length;
570 uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
571 file_length + sizeof(mtp_data_header));
Jerry Zhang44180302017-02-03 16:31:31 -0800572 uint64_t offset = mfr.offset;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700573 int packet_size = getPacketSize(mBulkIn);
Jerry Zhang487be612016-10-24 12:10:41 -0700574
Jerry Zhang44180302017-02-03 16:31:31 -0800575 // If file_length is larger than a size_t, truncating would produce the wrong comparison.
576 // Instead, promote the left side to 64 bits, then truncate the small result.
577 int init_read_len = std::min(
578 static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
Jerry Zhang487be612016-10-24 12:10:41 -0700579
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700580 advise(mfr.fd);
Jerry Zhange9d94422017-01-18 12:03:56 -0800581
Jerry Zhang487be612016-10-24 12:10:41 -0700582 struct aiocb aio;
583 aio.aio_fildes = mfr.fd;
584 struct aiocb *aiol[] = {&aio};
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700585 int ret = 0;
586 int length, num_read;
587 unsigned i = 0;
588 struct io_event ioevs[AIO_BUFS_MAX];
589 bool error = false;
590 bool has_write = false;
Jerry Zhang487be612016-10-24 12:10:41 -0700591
592 // Send the header data
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700593 mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
Ashish Kumar Guptaa84d6e72023-11-08 09:20:30 +0000594 if (header == NULL) {
595 return -1;
596 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700597 header->length = htole32(given_length);
598 header->type = htole16(2); // data packet
599 header->command = htole16(mfr.command);
600 header->transaction_id = htole32(mfr.transaction_id);
Jerry Zhang487be612016-10-24 12:10:41 -0700601
602 // Some hosts don't support header/data separation even though MTP allows it
603 // Handle by filling first packet with initial file data
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700604 if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
Jerry Zhang487be612016-10-24 12:10:41 -0700605 sizeof(mtp_data_header), init_read_len, offset))
606 != init_read_len) return -1;
Jerry Zhang297912b2018-05-11 11:29:54 -0700607 if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
608 false, false /* zlps are handled below */) == -1)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700609 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700610 file_length -= init_read_len;
611 offset += init_read_len;
Jerry Zhang54107562017-05-15 11:54:19 -0700612 ret = init_read_len + sizeof(mtp_data_header);
Jerry Zhang487be612016-10-24 12:10:41 -0700613
Jerry Zhang487be612016-10-24 12:10:41 -0700614 // Break down the file into pieces that fit in buffers
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700615 while(file_length > 0 || has_write) {
616 if (file_length > 0) {
617 // Queue up a read from disk.
618 length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
619 aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
620 aio_read(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700621 }
622
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700623 if (has_write) {
624 // Wait for usb write. Cancel unwritten portion if there's an error.
625 int num_events = 0;
626 if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
627 &num_events) != ret) {
628 error = true;
629 cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
Ray Chibd3a6492021-06-01 16:40:33 +0800630 mIobuf[(i-1)%NUM_IO_BUFS].actual, false);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700631 }
632 has_write = false;
Jerry Zhang7063c932017-04-04 15:06:10 -0700633 }
634
Jerry Zhang487be612016-10-24 12:10:41 -0700635 if (file_length > 0) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700636 // Wait for the previous read to finish
637 aio_suspend(aiol, 1, nullptr);
638 num_read = aio_return(&aio);
639 if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
640 errno = num_read == -1 ? aio_error(&aio) : EIO;
641 PLOG(ERROR) << "Mtp error reading from disk";
642 cancelTransaction();
643 return -1;
644 }
645
646 file_length -= num_read;
647 offset += num_read;
648
649 if (error) {
650 return -1;
651 }
652
653 // Queue up a write to usb.
654 if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
655 return -1;
656 }
657 has_write = true;
658 ret = num_read;
Jerry Zhang487be612016-10-24 12:10:41 -0700659 }
660
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700661 i = (i + 1) % NUM_IO_BUFS;
Jerry Zhang487be612016-10-24 12:10:41 -0700662 }
663
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700664 if (ret % packet_size == 0) {
Jerry Zhang487be612016-10-24 12:10:41 -0700665 // If the last packet wasn't short, send a final empty packet
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700666 if (write(mIobuf[0].bufs.data(), 0) != 0) {
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700667 return -1;
668 }
Jerry Zhang487be612016-10-24 12:10:41 -0700669 }
Jerry Zhang487be612016-10-24 12:10:41 -0700670 return 0;
671}
672
673int MtpFfsHandle::sendEvent(mtp_event me) {
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700674 // Mimic the behavior of f_mtp by sending the event async.
675 // Events aren't critical to the connection, so we don't need to check the return value.
676 char *temp = new char[me.length];
677 memcpy(temp, me.data, me.length);
678 me.data = temp;
Shruti Bihani50bf46a2023-07-13 09:19:08 +0000679
680 std::unique_lock lk(m);
681 child_threads++;
682 lk.unlock();
683
Jerry Zhang008f4df2017-08-09 17:53:50 -0700684 std::thread t([this, me]() { return this->doSendEvent(me); });
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700685 t.detach();
686 return 0;
687}
688
689void MtpFfsHandle::doSendEvent(mtp_event me) {
Jerry Zhang487be612016-10-24 12:10:41 -0700690 unsigned length = me.length;
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700691 int ret = ::write(mIntr, me.data, length);
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700692 if (static_cast<unsigned>(ret) != length)
693 PLOG(ERROR) << "Mtp error sending event thread!";
Jerry Zhang008f4df2017-08-09 17:53:50 -0700694 delete[] reinterpret_cast<char*>(me.data);
Shruti Bihani50bf46a2023-07-13 09:19:08 +0000695
696 std::unique_lock lk(m);
697 child_threads--;
698 lk.unlock();
699 cv.notify_one();
Jerry Zhang487be612016-10-24 12:10:41 -0700700}
701
702} // namespace android
703