blob: 26e5ddfe04acb276a16d20c07a42ccc3c945d613 [file] [log] [blame]
Jerry Zhang487be612016-10-24 12:10:41 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <android-base/logging.h>
18#include <android-base/properties.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070019#include <asyncio/AsyncIO.h>
Jerry Zhang487be612016-10-24 12:10:41 -070020#include <dirent.h>
21#include <errno.h>
22#include <fcntl.h>
Jerry Zhang487be612016-10-24 12:10:41 -070023#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070026#include <sys/eventfd.h>
Jerry Zhang487be612016-10-24 12:10:41 -070027#include <sys/ioctl.h>
Jerry Zhange9d94422017-01-18 12:03:56 -080028#include <sys/mman.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070029#include <sys/poll.h>
Jerry Zhang487be612016-10-24 12:10:41 -070030#include <sys/stat.h>
31#include <sys/types.h>
32#include <unistd.h>
Jerry Zhang487be612016-10-24 12:10:41 -070033
Jerry Zhangdf69dd32017-05-03 17:17:49 -070034#include "PosixAsyncIO.h"
Jerry Zhang69b74502017-10-02 16:26:37 -070035#include "MtpDescriptors.h"
Jerry Zhang487be612016-10-24 12:10:41 -070036#include "MtpFfsHandle.h"
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -080037#include "mtp.h"
Jerry Zhang487be612016-10-24 12:10:41 -070038
Jerry Zhang487be612016-10-24 12:10:41 -070039namespace {
40
Jerry Zhangdf69dd32017-05-03 17:17:49 -070041constexpr unsigned AIO_BUFS_MAX = 128;
42constexpr unsigned AIO_BUF_LEN = 16384;
Jerry Zhang487be612016-10-24 12:10:41 -070043
Jerry Zhangdf69dd32017-05-03 17:17:49 -070044constexpr unsigned FFS_NUM_EVENTS = 5;
Jerry Zhang487be612016-10-24 12:10:41 -070045
Jerry Zhangdf69dd32017-05-03 17:17:49 -070046constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
Jerry Zhangb4f54262017-02-02 18:14:33 -080047
Jerry Zhangdf69dd32017-05-03 17:17:49 -070048constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
James Weif2388b32018-12-18 17:39:58 +080049// Note: POLL_TIMEOUT_MS = 0 means return immediately i.e. no sleep.
50// And this will cause high CPU usage.
51constexpr int32_t POLL_TIMEOUT_MS = 500;
Jerry Zhang487be612016-10-24 12:10:41 -070052
Jerry Zhangdf69dd32017-05-03 17:17:49 -070053struct timespec ZERO_TIMEOUT = { 0, 0 };
Jerry Zhangb4f54262017-02-02 18:14:33 -080054
Jerry Zhangdf69dd32017-05-03 17:17:49 -070055struct mtp_device_status {
56 uint16_t wLength;
57 uint16_t wCode;
58};
59
Jerry Zhang487be612016-10-24 12:10:41 -070060} // anonymous namespace
61
62namespace android {
63
Jerry Zhangdf69dd32017-05-03 17:17:49 -070064int MtpFfsHandle::getPacketSize(int ffs_fd) {
65 struct usb_endpoint_descriptor desc;
66 if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
67 PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
68 return MAX_PACKET_SIZE_HS;
69 } else {
70 return desc.wMaxPacketSize;
71 }
72}
73
Jerry Zhang63dac452017-12-06 15:19:36 -080074MtpFfsHandle::MtpFfsHandle(int controlFd) {
75 mControl.reset(controlFd);
Ray Chibd3a6492021-06-01 16:40:33 +080076 mBatchCancel = android::base::GetBoolProperty("sys.usb.mtp.batchcancel", false);
Jerry Zhang63dac452017-12-06 15:19:36 -080077}
Jerry Zhang487be612016-10-24 12:10:41 -070078
79MtpFfsHandle::~MtpFfsHandle() {}
80
81void MtpFfsHandle::closeEndpoints() {
82 mIntr.reset();
83 mBulkIn.reset();
84 mBulkOut.reset();
85}
86
Jerry Zhang63dac452017-12-06 15:19:36 -080087bool MtpFfsHandle::openEndpoints(bool ptp) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -070088 if (mBulkIn < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080089 mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
Jerry Zhangdf69dd32017-05-03 17:17:49 -070090 if (mBulkIn < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080091 PLOG(ERROR) << (ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN) << ": cannot open bulk in ep";
Jerry Zhangdf69dd32017-05-03 17:17:49 -070092 return false;
93 }
94 }
95
96 if (mBulkOut < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080097 mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
Jerry Zhangdf69dd32017-05-03 17:17:49 -070098 if (mBulkOut < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -080099 PLOG(ERROR) << (ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT) << ": cannot open bulk out ep";
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700100 return false;
101 }
102 }
103
104 if (mIntr < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -0800105 mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700106 if (mIntr < 0) {
Jerry Zhang63dac452017-12-06 15:19:36 -0800107 PLOG(ERROR) << (ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR) << ": cannot open intr ep";
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700108 return false;
109 }
110 }
111 return true;
112}
113
114void MtpFfsHandle::advise(int fd) {
115 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
116 if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
Chih-Hung Hsiehb6409542020-04-21 11:41:49 -0700117 POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) != 0)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700118 PLOG(ERROR) << "Failed to madvise";
119 }
120 if (posix_fadvise(fd, 0, 0,
Chih-Hung Hsiehb6409542020-04-21 11:41:49 -0700121 POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) != 0)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700122 PLOG(ERROR) << "Failed to fadvise";
123}
124
Jerry Zhang63dac452017-12-06 15:19:36 -0800125bool MtpFfsHandle::writeDescriptors(bool ptp) {
126 return ::android::writeDescriptors(mControl, ptp);
Jerry Zhang487be612016-10-24 12:10:41 -0700127}
128
129void MtpFfsHandle::closeConfig() {
130 mControl.reset();
131}
132
Jerry Zhang297912b2018-05-11 11:29:54 -0700133int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
134 struct io_event ioevs[AIO_BUFS_MAX];
135 size_t total = 0;
136
137 while (total < len) {
138 size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
139 int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
140 for (int i = 0; i < num_bufs; i++) {
141 mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
142 }
143 int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
144 if (ret < 0) return -1;
145 ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
146 if (ret < 0) return -1;
147 total += ret;
148 if (static_cast<size_t>(ret) < this_len) break;
Jerry Zhang487be612016-10-24 12:10:41 -0700149 }
Jerry Zhang297912b2018-05-11 11:29:54 -0700150
151 int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
152 if (len % packet_size == 0 && zero_packet) {
153 int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
154 if (ret < 0) return -1;
155 ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
156 if (ret < 0) return -1;
157 }
158
159 for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
160 mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
161 }
162 return total;
Jerry Zhang487be612016-10-24 12:10:41 -0700163}
164
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700165int MtpFfsHandle::read(void* data, size_t len) {
Jerry Zhang297912b2018-05-11 11:29:54 -0700166 // Zero packets are handled by receiveFile()
167 return doAsync(data, len, true, false);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700168}
169
170int MtpFfsHandle::write(const void* data, size_t len) {
Jerry Zhang297912b2018-05-11 11:29:54 -0700171 return doAsync(const_cast<void*>(data), len, false, true);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700172}
173
174int MtpFfsHandle::handleEvent() {
175
176 std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
177 usb_functionfs_event *event = events.data();
178 int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
179 events.size() * sizeof(usb_functionfs_event)));
180 if (nbytes == -1) {
181 return -1;
182 }
Jerry Zhang487be612016-10-24 12:10:41 -0700183 int ret = 0;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700184 for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
185 switch (event->type) {
186 case FUNCTIONFS_BIND:
187 case FUNCTIONFS_ENABLE:
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700188 ret = 0;
189 errno = 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700190 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700191 case FUNCTIONFS_UNBIND:
192 case FUNCTIONFS_DISABLE:
193 errno = ESHUTDOWN;
194 ret = -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700195 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700196 case FUNCTIONFS_SETUP:
197 if (handleControlRequest(&event->u.setup) == -1)
198 ret = -1;
199 break;
Jerry Zhang63dac452017-12-06 15:19:36 -0800200 case FUNCTIONFS_SUSPEND:
201 case FUNCTIONFS_RESUME:
202 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700203 default:
204 LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
205 }
Jerry Zhang487be612016-10-24 12:10:41 -0700206 }
207 return ret;
208}
209
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700210int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
211 uint8_t type = setup->bRequestType;
212 uint8_t code = setup->bRequest;
213 uint16_t length = setup->wLength;
214 uint16_t index = setup->wIndex;
215 uint16_t value = setup->wValue;
216 std::vector<char> buf;
217 buf.resize(length);
Jerry Zhang487be612016-10-24 12:10:41 -0700218
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700219 if (!(type & USB_DIR_IN)) {
220 if (::read(mControl, buf.data(), length) != length) {
221 PLOG(ERROR) << "Mtp error ctrlreq read data";
222 }
223 }
224
225 if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
226 switch(code) {
227 case MTP_REQ_RESET:
228 case MTP_REQ_CANCEL:
229 errno = ECANCELED;
James Weid97d79c2018-09-25 23:24:27 +0800230 return -1;
231 // break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700232 case MTP_REQ_GET_DEVICE_STATUS:
233 {
234 if (length < sizeof(struct mtp_device_status) + 4) {
235 errno = EINVAL;
236 return -1;
237 }
238 struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
239 st->wLength = htole16(sizeof(st));
240 if (mCanceled) {
241 st->wLength += 4;
242 st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
243 uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
244 endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
245 endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
246 mCanceled = false;
247 } else {
248 st->wCode = MTP_RESPONSE_OK;
249 }
250 length = st->wLength;
251 break;
252 }
253 default:
254 LOG(ERROR) << "Unrecognized Mtp class request! " << code;
255 }
256 } else {
257 LOG(ERROR) << "Unrecognized request type " << type;
258 }
259
260 if (type & USB_DIR_IN) {
261 if (::write(mControl, buf.data(), length) != length) {
262 PLOG(ERROR) << "Mtp error ctrlreq write data";
263 }
264 }
265 return 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700266}
267
Jerry Zhang63dac452017-12-06 15:19:36 -0800268int MtpFfsHandle::start(bool ptp) {
269 if (!openEndpoints(ptp))
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800270 return -1;
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800271
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700272 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
273 mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
274 mIobuf[i].iocb.resize(AIO_BUFS_MAX);
275 mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
276 mIobuf[i].buf.resize(AIO_BUFS_MAX);
277 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
278 mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
279 mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800280 }
281 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700282
283 memset(&mCtx, 0, sizeof(mCtx));
284 if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
285 PLOG(ERROR) << "unable to setup aio";
286 return -1;
287 }
288 mEventFd.reset(eventfd(0, EFD_NONBLOCK));
289 mPollFds[0].fd = mControl;
290 mPollFds[0].events = POLLIN;
291 mPollFds[1].fd = mEventFd;
292 mPollFds[1].events = POLLIN;
293
294 mCanceled = false;
Jerry Zhangb4f54262017-02-02 18:14:33 -0800295 return 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700296}
297
Jerry Zhang487be612016-10-24 12:10:41 -0700298void MtpFfsHandle::close() {
Ashish Kumar Gupta24b80a22024-08-12 10:36:38 +0000299 // Join all child threads before destruction
Ashish Kumar Gupta80edb962024-10-10 06:21:31 +0000300 int count = mChildThreads.size();
301 for (int i = 0; i < count; i++) {
302 mChildThreads[i].join();
Ashish Kumar Gupta24b80a22024-08-12 10:36:38 +0000303 }
Ashish Kumar Gupta80edb962024-10-10 06:21:31 +0000304 mChildThreads.clear();
Shruti Bihani50bf46a2023-07-13 09:19:08 +0000305
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700306 io_destroy(mCtx);
Jerry Zhang487be612016-10-24 12:10:41 -0700307 closeEndpoints();
Jerry Zhang63dac452017-12-06 15:19:36 -0800308 closeConfig();
Jerry Zhang487be612016-10-24 12:10:41 -0700309}
310
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700311int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
312 int *counter) {
313 int num_events = 0;
314 int ret = 0;
315 int error = 0;
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700316
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700317 while (num_events < min_events) {
James Weif2388b32018-12-18 17:39:58 +0800318 if (poll(mPollFds, 2, POLL_TIMEOUT_MS) == -1) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700319 PLOG(ERROR) << "Mtp error during poll()";
320 return -1;
321 }
322 if (mPollFds[0].revents & POLLIN) {
323 mPollFds[0].revents = 0;
324 if (handleEvent() == -1) {
325 error = errno;
326 }
327 }
328 if (mPollFds[1].revents & POLLIN) {
329 mPollFds[1].revents = 0;
330 uint64_t ev_cnt = 0;
331
332 if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
333 PLOG(ERROR) << "Mtp unable to read eventfd";
334 error = errno;
335 continue;
336 }
337
338 // It's possible that io_getevents will return more events than the eventFd reported,
339 // since events may appear in the time between the calls. In this case, the eventFd will
340 // show up as readable next iteration, but there will be fewer or no events to actually
341 // wait for. Thus we never want io_getevents to block.
342 int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
343 if (this_events == -1) {
344 PLOG(ERROR) << "Mtp error getting events";
345 error = errno;
346 }
347 // Add up the total amount of data and find errors on the way.
348 for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
349 if (events[j].res < 0) {
350 errno = -events[j].res;
351 PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
352 error = errno;
353 }
354 ret += events[j].res;
355 }
356 num_events += this_events;
357 if (counter)
358 *counter += this_events;
359 }
360 if (error) {
361 errno = error;
362 ret = -1;
363 break;
364 }
365 }
366 return ret;
367}
368
369void MtpFfsHandle::cancelTransaction() {
370 // Device cancels by stalling both bulk endpoints.
Zijun Zhao05858802023-01-30 22:48:11 +0000371 if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700372 PLOG(ERROR) << "Mtp stall failed on bulk in";
Zijun Zhao05858802023-01-30 22:48:11 +0000373 if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700374 PLOG(ERROR) << "Mtp stall failed on bulk out";
375 mCanceled = true;
376 errno = ECANCELED;
377}
378
379int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
Ray Chibd3a6492021-06-01 16:40:33 +0800380 unsigned end, bool is_batch_cancel) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700381 // Some manpages for io_cancel are out of date and incorrect.
382 // io_cancel will return -EINPROGRESS on success and does
383 // not place the event in the given memory. We have to use
384 // io_getevents to wait for all the events we cancelled.
385 int ret = 0;
386 unsigned num_events = 0;
387 int save_errno = errno;
388 errno = 0;
389
390 for (unsigned j = start; j < end; j++) {
391 if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
392 PLOG(ERROR) << "Mtp couldn't cancel request " << j;
393 } else {
394 num_events++;
395 }
Ray Chibd3a6492021-06-01 16:40:33 +0800396 if (is_batch_cancel && num_events == 1) {
397 num_events = end - start;
398 break;
399 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700400 }
401 if (num_events != end - start) {
402 ret = -1;
403 errno = EIO;
404 }
405 int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
406 if (static_cast<unsigned>(evs) != num_events) {
407 PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
408 ret = -1;
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700409 }
Jerry Zhang487be612016-10-24 12:10:41 -0700410
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700411 uint64_t ev_cnt = 0;
412 if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
413 PLOG(ERROR) << "Mtp Unable to read event fd";
414
415 if (ret == 0) {
416 // Restore errno since it probably got overriden with EINPROGRESS.
417 errno = save_errno;
418 }
419 return ret;
420}
421
422int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
423 int ret = 0;
424 buf->actual = AIO_BUFS_MAX;
425 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
426 unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
427 io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
428 buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
429 buf->iocb[j]->aio_resfd = mEventFd;
430
431 // Not enough data, so table is truncated.
432 if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
433 buf->actual = j + 1;
434 break;
435 }
436 }
437
438 ret = io_submit(mCtx, buf->actual, buf->iocb.data());
439 if (ret != static_cast<int>(buf->actual)) {
440 PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
441 if (ret != -1) {
442 errno = EIO;
443 }
444 ret = -1;
445 }
446 return ret;
447}
448
449int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
450 // When receiving files, the incoming length is given in 32 bits.
451 // A >=4G file is given as 0xFFFFFFFF
452 uint32_t file_length = mfr.length;
453 uint64_t offset = mfr.offset;
Jerry Zhang487be612016-10-24 12:10:41 -0700454
455 struct aiocb aio;
456 aio.aio_fildes = mfr.fd;
457 aio.aio_buf = nullptr;
458 struct aiocb *aiol[] = {&aio};
Jerry Zhang487be612016-10-24 12:10:41 -0700459
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700460 int ret = -1;
461 unsigned i = 0;
462 size_t length;
463 struct io_event ioevs[AIO_BUFS_MAX];
464 bool has_write = false;
465 bool error = false;
466 bool write_error = false;
467 int packet_size = getPacketSize(mBulkOut);
468 bool short_packet = false;
469 advise(mfr.fd);
Jerry Zhang487be612016-10-24 12:10:41 -0700470
471 // Break down the file into pieces that fit in buffers
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700472 while (file_length > 0 || has_write) {
473 // Queue an asynchronous read from USB.
Jerry Zhang487be612016-10-24 12:10:41 -0700474 if (file_length > 0) {
475 length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700476 if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
477 error = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700478 }
479
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700480 // Get the return status of the last write request.
481 if (has_write) {
Jerry Zhang487be612016-10-24 12:10:41 -0700482 aio_suspend(aiol, 1, nullptr);
Jerry Zhang487be612016-10-24 12:10:41 -0700483 int written = aio_return(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700484 if (static_cast<size_t>(written) < aio.aio_nbytes) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700485 errno = written == -1 ? aio_error(&aio) : EIO;
486 PLOG(ERROR) << "Mtp error writing to disk";
487 write_error = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700488 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700489 has_write = false;
Jerry Zhang487be612016-10-24 12:10:41 -0700490 }
491
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700492 if (error) {
Jerry Zhang7063c932017-04-04 15:06:10 -0700493 return -1;
494 }
495
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700496 // Get the result of the read request, and queue a write to disk.
497 if (file_length > 0) {
498 unsigned num_events = 0;
499 ret = 0;
500 unsigned short_i = mIobuf[i].actual;
501 while (num_events < short_i) {
502 // Get all events up to the short read, if there is one.
503 // We must wait for each event since data transfer could end at any time.
504 int this_events = 0;
505 int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
506 num_events += this_events;
507
508 if (event_ret == -1) {
Ray Chibd3a6492021-06-01 16:40:33 +0800509 cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual,
510 mBatchCancel);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700511 return -1;
512 }
513 ret += event_ret;
514 for (int j = 0; j < this_events; j++) {
515 // struct io_event contains a pointer to the associated struct iocb as a __u64.
516 if (static_cast<__u64>(ioevs[j].res) <
517 reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
518 // We've found a short event. Store the index since
519 // events won't necessarily arrive in the order they are queued.
520 short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
521 / sizeof(struct iocb) + 1;
522 short_packet = true;
523 }
524 }
525 }
526 if (short_packet) {
Ray Chi3f658d22021-06-23 12:19:38 +0800527 if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual,
528 mBatchCancel)) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700529 write_error = true;
530 }
531 }
Jerry Zhang487be612016-10-24 12:10:41 -0700532 if (file_length == MAX_MTP_FILE_SIZE) {
533 // For larger files, receive until a short packet is received.
534 if (static_cast<size_t>(ret) < length) {
535 file_length = 0;
536 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700537 } else if (ret < static_cast<int>(length)) {
538 // If file is less than 4G and we get a short packet, it's an error.
539 errno = EIO;
540 LOG(ERROR) << "Mtp got unexpected short packet";
541 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700542 } else {
543 file_length -= ret;
544 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700545
546 if (write_error) {
547 cancelTransaction();
548 return -1;
549 }
550
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700551 // Enqueue a new write request
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700552 aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700553 aio_write(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700554
555 offset += ret;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700556 i = (i + 1) % NUM_IO_BUFS;
557 has_write = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700558 }
559 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700560 if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
561 // Receive an empty packet if size is a multiple of the endpoint size
562 // and we didn't already get an empty packet from the header or large file.
563 if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
Jerry Zhang54107562017-05-15 11:54:19 -0700564 return -1;
565 }
566 }
Jerry Zhang487be612016-10-24 12:10:41 -0700567 return 0;
568}
569
Jerry Zhang487be612016-10-24 12:10:41 -0700570int MtpFfsHandle::sendFile(mtp_file_range mfr) {
571 uint64_t file_length = mfr.length;
572 uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
573 file_length + sizeof(mtp_data_header));
Jerry Zhang44180302017-02-03 16:31:31 -0800574 uint64_t offset = mfr.offset;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700575 int packet_size = getPacketSize(mBulkIn);
Jerry Zhang487be612016-10-24 12:10:41 -0700576
Jerry Zhang44180302017-02-03 16:31:31 -0800577 // If file_length is larger than a size_t, truncating would produce the wrong comparison.
578 // Instead, promote the left side to 64 bits, then truncate the small result.
579 int init_read_len = std::min(
580 static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
Jerry Zhang487be612016-10-24 12:10:41 -0700581
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700582 advise(mfr.fd);
Jerry Zhange9d94422017-01-18 12:03:56 -0800583
Jerry Zhang487be612016-10-24 12:10:41 -0700584 struct aiocb aio;
585 aio.aio_fildes = mfr.fd;
586 struct aiocb *aiol[] = {&aio};
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700587 int ret = 0;
588 int length, num_read;
589 unsigned i = 0;
590 struct io_event ioevs[AIO_BUFS_MAX];
591 bool error = false;
592 bool has_write = false;
Jerry Zhang487be612016-10-24 12:10:41 -0700593
594 // Send the header data
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700595 mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
Ashish Kumar Guptaa84d6e72023-11-08 09:20:30 +0000596 if (header == NULL) {
597 return -1;
598 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700599 header->length = htole32(given_length);
600 header->type = htole16(2); // data packet
601 header->command = htole16(mfr.command);
602 header->transaction_id = htole32(mfr.transaction_id);
Jerry Zhang487be612016-10-24 12:10:41 -0700603
604 // Some hosts don't support header/data separation even though MTP allows it
605 // Handle by filling first packet with initial file data
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700606 if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
Jerry Zhang487be612016-10-24 12:10:41 -0700607 sizeof(mtp_data_header), init_read_len, offset))
608 != init_read_len) return -1;
Jerry Zhang297912b2018-05-11 11:29:54 -0700609 if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
610 false, false /* zlps are handled below */) == -1)
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700611 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700612 file_length -= init_read_len;
613 offset += init_read_len;
Jerry Zhang54107562017-05-15 11:54:19 -0700614 ret = init_read_len + sizeof(mtp_data_header);
Jerry Zhang487be612016-10-24 12:10:41 -0700615
Jerry Zhang487be612016-10-24 12:10:41 -0700616 // Break down the file into pieces that fit in buffers
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700617 while(file_length > 0 || has_write) {
618 if (file_length > 0) {
619 // Queue up a read from disk.
620 length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
621 aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
622 aio_read(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700623 }
624
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700625 if (has_write) {
626 // Wait for usb write. Cancel unwritten portion if there's an error.
627 int num_events = 0;
628 if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
629 &num_events) != ret) {
630 error = true;
631 cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
Ray Chibd3a6492021-06-01 16:40:33 +0800632 mIobuf[(i-1)%NUM_IO_BUFS].actual, false);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700633 }
634 has_write = false;
Jerry Zhang7063c932017-04-04 15:06:10 -0700635 }
636
Jerry Zhang487be612016-10-24 12:10:41 -0700637 if (file_length > 0) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700638 // Wait for the previous read to finish
639 aio_suspend(aiol, 1, nullptr);
640 num_read = aio_return(&aio);
641 if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
642 errno = num_read == -1 ? aio_error(&aio) : EIO;
643 PLOG(ERROR) << "Mtp error reading from disk";
644 cancelTransaction();
645 return -1;
646 }
647
648 file_length -= num_read;
649 offset += num_read;
650
651 if (error) {
652 return -1;
653 }
654
655 // Queue up a write to usb.
656 if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
657 return -1;
658 }
659 has_write = true;
660 ret = num_read;
Jerry Zhang487be612016-10-24 12:10:41 -0700661 }
662
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700663 i = (i + 1) % NUM_IO_BUFS;
Jerry Zhang487be612016-10-24 12:10:41 -0700664 }
665
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700666 if (ret % packet_size == 0) {
Jerry Zhang487be612016-10-24 12:10:41 -0700667 // If the last packet wasn't short, send a final empty packet
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700668 if (write(mIobuf[0].bufs.data(), 0) != 0) {
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700669 return -1;
670 }
Jerry Zhang487be612016-10-24 12:10:41 -0700671 }
Jerry Zhang487be612016-10-24 12:10:41 -0700672 return 0;
673}
674
675int MtpFfsHandle::sendEvent(mtp_event me) {
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700676 // Mimic the behavior of f_mtp by sending the event async.
677 // Events aren't critical to the connection, so we don't need to check the return value.
678 char *temp = new char[me.length];
679 memcpy(temp, me.data, me.length);
680 me.data = temp;
Shruti Bihani50bf46a2023-07-13 09:19:08 +0000681
Jerry Zhang008f4df2017-08-09 17:53:50 -0700682 std::thread t([this, me]() { return this->doSendEvent(me); });
Ashish Kumar Gupta24b80a22024-08-12 10:36:38 +0000683
684 // Store the thread object for later joining
685 mChildThreads.emplace_back(std::move(t));
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700686 return 0;
687}
688
689void MtpFfsHandle::doSendEvent(mtp_event me) {
Jerry Zhang487be612016-10-24 12:10:41 -0700690 unsigned length = me.length;
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700691 int ret = ::write(mIntr, me.data, length);
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700692 if (static_cast<unsigned>(ret) != length)
693 PLOG(ERROR) << "Mtp error sending event thread!";
Jerry Zhang008f4df2017-08-09 17:53:50 -0700694 delete[] reinterpret_cast<char*>(me.data);
Jerry Zhang487be612016-10-24 12:10:41 -0700695}
696
697} // namespace android
698