diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2015-02-07 00:30:23 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2015-02-17 22:23:33 -0500 |
commit | 7fe3976e0f3ab26f8ffd9430d3d2a19a70f2c8d2 (patch) | |
tree | 3f00451992c3fbe4ba1c21fbfba9852011017b5a | |
parent | f01d35a15fa04162a58b95970fc01fa70ec9dacd (diff) |
gadget: switch ep_io_operations to ->read_iter/->write_iter
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | drivers/usb/gadget/legacy/inode.c | 355 |
1 files changed, 141 insertions, 214 deletions
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 9fbbaa041a31..b825edcbf387 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -363,97 +363,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) | |||
363 | return value; | 363 | return value; |
364 | } | 364 | } |
365 | 365 | ||
366 | |||
367 | /* handle a synchronous OUT bulk/intr/iso transfer */ | ||
368 | static ssize_t | ||
369 | ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | ||
370 | { | ||
371 | struct ep_data *data = fd->private_data; | ||
372 | void *kbuf; | ||
373 | ssize_t value; | ||
374 | |||
375 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
376 | return value; | ||
377 | |||
378 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
379 | if (usb_endpoint_dir_in(&data->desc)) { | ||
380 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
381 | mutex_unlock(&data->lock); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | DBG (data->dev, "%s halt\n", data->name); | ||
385 | spin_lock_irq (&data->dev->lock); | ||
386 | if (likely (data->ep != NULL)) | ||
387 | usb_ep_set_halt (data->ep); | ||
388 | spin_unlock_irq (&data->dev->lock); | ||
389 | mutex_unlock(&data->lock); | ||
390 | return -EBADMSG; | ||
391 | } | ||
392 | |||
393 | /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ | ||
394 | |||
395 | value = -ENOMEM; | ||
396 | kbuf = kmalloc (len, GFP_KERNEL); | ||
397 | if (unlikely (!kbuf)) | ||
398 | goto free1; | ||
399 | |||
400 | value = ep_io (data, kbuf, len); | ||
401 | VDEBUG (data->dev, "%s read %zu OUT, status %d\n", | ||
402 | data->name, len, (int) value); | ||
403 | if (value >= 0 && copy_to_user (buf, kbuf, value)) | ||
404 | value = -EFAULT; | ||
405 | |||
406 | free1: | ||
407 | mutex_unlock(&data->lock); | ||
408 | kfree (kbuf); | ||
409 | return value; | ||
410 | } | ||
411 | |||
412 | /* handle a synchronous IN bulk/intr/iso transfer */ | ||
413 | static ssize_t | ||
414 | ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | ||
415 | { | ||
416 | struct ep_data *data = fd->private_data; | ||
417 | void *kbuf; | ||
418 | ssize_t value; | ||
419 | |||
420 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
421 | return value; | ||
422 | |||
423 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
424 | if (!usb_endpoint_dir_in(&data->desc)) { | ||
425 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
426 | mutex_unlock(&data->lock); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | DBG (data->dev, "%s halt\n", data->name); | ||
430 | spin_lock_irq (&data->dev->lock); | ||
431 | if (likely (data->ep != NULL)) | ||
432 | usb_ep_set_halt (data->ep); | ||
433 | spin_unlock_irq (&data->dev->lock); | ||
434 | mutex_unlock(&data->lock); | ||
435 | return -EBADMSG; | ||
436 | } | ||
437 | |||
438 | /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ | ||
439 | |||
440 | value = -ENOMEM; | ||
441 | kbuf = memdup_user(buf, len); | ||
442 | if (IS_ERR(kbuf)) { | ||
443 | value = PTR_ERR(kbuf); | ||
444 | kbuf = NULL; | ||
445 | goto free1; | ||
446 | } | ||
447 | |||
448 | value = ep_io (data, kbuf, len); | ||
449 | VDEBUG (data->dev, "%s write %zu IN, status %d\n", | ||
450 | data->name, len, (int) value); | ||
451 | free1: | ||
452 | mutex_unlock(&data->lock); | ||
453 | kfree (kbuf); | ||
454 | return value; | ||
455 | } | ||
456 | |||
457 | static int | 366 | static int |
458 | ep_release (struct inode *inode, struct file *fd) | 367 | ep_release (struct inode *inode, struct file *fd) |
459 | { | 368 | { |
@@ -517,8 +426,8 @@ struct kiocb_priv { | |||
517 | struct mm_struct *mm; | 426 | struct mm_struct *mm; |
518 | struct work_struct work; | 427 | struct work_struct work; |
519 | void *buf; | 428 | void *buf; |
520 | const struct iovec *iv; | 429 | struct iov_iter to; |
521 | unsigned long nr_segs; | 430 | const void *to_free; |
522 | unsigned actual; | 431 | unsigned actual; |
523 | }; | 432 | }; |
524 | 433 | ||
@@ -541,34 +450,6 @@ static int ep_aio_cancel(struct kiocb *iocb) | |||
541 | return value; | 450 | return value; |
542 | } | 451 | } |
543 | 452 | ||
544 | static ssize_t ep_copy_to_user(struct kiocb_priv *priv) | ||
545 | { | ||
546 | ssize_t len, total; | ||
547 | void *to_copy; | ||
548 | int i; | ||
549 | |||
550 | /* copy stuff into user buffers */ | ||
551 | total = priv->actual; | ||
552 | len = 0; | ||
553 | to_copy = priv->buf; | ||
554 | for (i=0; i < priv->nr_segs; i++) { | ||
555 | ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); | ||
556 | |||
557 | if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { | ||
558 | if (len == 0) | ||
559 | len = -EFAULT; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | total -= this; | ||
564 | len += this; | ||
565 | to_copy += this; | ||
566 | if (total == 0) | ||
567 | break; | ||
568 | } | ||
569 | return len; | ||
570 | } | ||
571 | |||
572 | static void ep_user_copy_worker(struct work_struct *work) | 453 | static void ep_user_copy_worker(struct work_struct *work) |
573 | { | 454 | { |
574 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); | 455 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); |
@@ -577,14 +458,16 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
577 | size_t ret; | 458 | size_t ret; |
578 | 459 | ||
579 | use_mm(mm); | 460 | use_mm(mm); |
580 | ret = ep_copy_to_user(priv); | 461 | ret = copy_to_iter(priv->buf, priv->actual, &priv->to); |
581 | unuse_mm(mm); | 462 | unuse_mm(mm); |
463 | if (!ret) | ||
464 | ret = -EFAULT; | ||
582 | 465 | ||
583 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 466 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
584 | aio_complete(iocb, ret, ret); | 467 | aio_complete(iocb, ret, ret); |
585 | 468 | ||
586 | kfree(priv->buf); | 469 | kfree(priv->buf); |
587 | kfree(priv->iv); | 470 | kfree(priv->to_free); |
588 | kfree(priv); | 471 | kfree(priv); |
589 | } | 472 | } |
590 | 473 | ||
@@ -603,9 +486,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
603 | * don't need to copy anything to userspace, so we can | 486 | * don't need to copy anything to userspace, so we can |
604 | * complete the aio request immediately. | 487 | * complete the aio request immediately. |
605 | */ | 488 | */ |
606 | if (priv->iv == NULL || unlikely(req->actual == 0)) { | 489 | if (priv->to_free == NULL || unlikely(req->actual == 0)) { |
607 | kfree(req->buf); | 490 | kfree(req->buf); |
608 | kfree(priv->iv); | 491 | kfree(priv->to_free); |
609 | kfree(priv); | 492 | kfree(priv); |
610 | iocb->private = NULL; | 493 | iocb->private = NULL; |
611 | /* aio_complete() reports bytes-transferred _and_ faults */ | 494 | /* aio_complete() reports bytes-transferred _and_ faults */ |
@@ -619,6 +502,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
619 | 502 | ||
620 | priv->buf = req->buf; | 503 | priv->buf = req->buf; |
621 | priv->actual = req->actual; | 504 | priv->actual = req->actual; |
505 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
622 | schedule_work(&priv->work); | 506 | schedule_work(&priv->work); |
623 | } | 507 | } |
624 | spin_unlock(&epdata->dev->lock); | 508 | spin_unlock(&epdata->dev->lock); |
@@ -627,45 +511,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
627 | put_ep(epdata); | 511 | put_ep(epdata); |
628 | } | 512 | } |
629 | 513 | ||
630 | static ssize_t | 514 | static ssize_t ep_aio(struct kiocb *iocb, |
631 | ep_aio_rwtail( | 515 | struct kiocb_priv *priv, |
632 | struct kiocb *iocb, | 516 | struct ep_data *epdata, |
633 | char *buf, | 517 | char *buf, |
634 | size_t len, | 518 | size_t len) |
635 | struct ep_data *epdata, | ||
636 | const struct iovec *iv, | ||
637 | unsigned long nr_segs | ||
638 | ) | ||
639 | { | 519 | { |
640 | struct kiocb_priv *priv; | 520 | struct usb_request *req; |
641 | struct usb_request *req; | 521 | ssize_t value; |
642 | ssize_t value; | ||
643 | 522 | ||
644 | priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
645 | if (!priv) { | ||
646 | value = -ENOMEM; | ||
647 | fail: | ||
648 | kfree(buf); | ||
649 | return value; | ||
650 | } | ||
651 | iocb->private = priv; | 523 | iocb->private = priv; |
652 | priv->iocb = iocb; | 524 | priv->iocb = iocb; |
653 | if (iv) { | ||
654 | priv->iv = kmemdup(iv, nr_segs * sizeof(struct iovec), | ||
655 | GFP_KERNEL); | ||
656 | if (!priv->iv) { | ||
657 | kfree(priv); | ||
658 | goto fail; | ||
659 | } | ||
660 | } | ||
661 | priv->nr_segs = nr_segs; | ||
662 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
663 | |||
664 | value = get_ready_ep(iocb->ki_filp->f_flags, epdata); | ||
665 | if (unlikely(value < 0)) { | ||
666 | kfree(priv); | ||
667 | goto fail; | ||
668 | } | ||
669 | 525 | ||
670 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); | 526 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); |
671 | get_ep(epdata); | 527 | get_ep(epdata); |
@@ -677,76 +533,147 @@ fail: | |||
677 | * allocate or submit those if the host disconnected. | 533 | * allocate or submit those if the host disconnected. |
678 | */ | 534 | */ |
679 | spin_lock_irq(&epdata->dev->lock); | 535 | spin_lock_irq(&epdata->dev->lock); |
680 | if (likely(epdata->ep)) { | 536 | value = -ENODEV; |
681 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 537 | if (unlikely(epdata->ep)) |
682 | if (likely(req)) { | 538 | goto fail; |
683 | priv->req = req; | ||
684 | req->buf = buf; | ||
685 | req->length = len; | ||
686 | req->complete = ep_aio_complete; | ||
687 | req->context = iocb; | ||
688 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
689 | if (unlikely(0 != value)) | ||
690 | usb_ep_free_request(epdata->ep, req); | ||
691 | } else | ||
692 | value = -EAGAIN; | ||
693 | } else | ||
694 | value = -ENODEV; | ||
695 | spin_unlock_irq(&epdata->dev->lock); | ||
696 | 539 | ||
697 | mutex_unlock(&epdata->lock); | 540 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
541 | value = -ENOMEM; | ||
542 | if (unlikely(!req)) | ||
543 | goto fail; | ||
698 | 544 | ||
699 | if (unlikely(value)) { | 545 | priv->req = req; |
700 | kfree(priv->iv); | 546 | req->buf = buf; |
701 | kfree(priv); | 547 | req->length = len; |
702 | put_ep(epdata); | 548 | req->complete = ep_aio_complete; |
703 | } else | 549 | req->context = iocb; |
704 | value = -EIOCBQUEUED; | 550 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); |
551 | if (unlikely(0 != value)) { | ||
552 | usb_ep_free_request(epdata->ep, req); | ||
553 | goto fail; | ||
554 | } | ||
555 | spin_unlock_irq(&epdata->dev->lock); | ||
556 | return -EIOCBQUEUED; | ||
557 | |||
558 | fail: | ||
559 | spin_unlock_irq(&epdata->dev->lock); | ||
560 | kfree(priv->to_free); | ||
561 | kfree(priv); | ||
562 | put_ep(epdata); | ||
705 | return value; | 563 | return value; |
706 | } | 564 | } |
707 | 565 | ||
708 | static ssize_t | 566 | static ssize_t |
709 | ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | 567 | ep_read_iter(struct kiocb *iocb, struct iov_iter *to) |
710 | unsigned long nr_segs, loff_t o) | ||
711 | { | 568 | { |
712 | struct ep_data *epdata = iocb->ki_filp->private_data; | 569 | struct file *file = iocb->ki_filp; |
713 | char *buf; | 570 | struct ep_data *epdata = file->private_data; |
571 | size_t len = iov_iter_count(to); | ||
572 | ssize_t value; | ||
573 | char *buf; | ||
714 | 574 | ||
715 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 575 | if ((value = get_ready_ep(file->f_flags, epdata)) < 0) |
716 | return -EINVAL; | 576 | return value; |
717 | 577 | ||
718 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 578 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
719 | if (unlikely(!buf)) | 579 | if (usb_endpoint_dir_in(&epdata->desc)) { |
720 | return -ENOMEM; | 580 | if (usb_endpoint_xfer_isoc(&epdata->desc) || |
581 | !is_sync_kiocb(iocb)) { | ||
582 | mutex_unlock(&epdata->lock); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
586 | spin_lock_irq(&epdata->dev->lock); | ||
587 | if (likely(epdata->ep != NULL)) | ||
588 | usb_ep_set_halt(epdata->ep); | ||
589 | spin_unlock_irq(&epdata->dev->lock); | ||
590 | mutex_unlock(&epdata->lock); | ||
591 | return -EBADMSG; | ||
592 | } | ||
721 | 593 | ||
722 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); | 594 | buf = kmalloc(len, GFP_KERNEL); |
595 | if (unlikely(!buf)) { | ||
596 | mutex_unlock(&epdata->lock); | ||
597 | return -ENOMEM; | ||
598 | } | ||
599 | if (is_sync_kiocb(iocb)) { | ||
600 | value = ep_io(epdata, buf, len); | ||
601 | if (value >= 0 && copy_to_iter(buf, value, to)) | ||
602 | value = -EFAULT; | ||
603 | } else { | ||
604 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
605 | value = -ENOMEM; | ||
606 | if (!priv) | ||
607 | goto fail; | ||
608 | priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); | ||
609 | if (!priv->to_free) { | ||
610 | kfree(priv); | ||
611 | goto fail; | ||
612 | } | ||
613 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
614 | if (value == -EIOCBQUEUED) | ||
615 | buf = NULL; | ||
616 | } | ||
617 | fail: | ||
618 | kfree(buf); | ||
619 | mutex_unlock(&epdata->lock); | ||
620 | return value; | ||
723 | } | 621 | } |
724 | 622 | ||
725 | static ssize_t | 623 | static ssize_t |
726 | ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | 624 | ep_write_iter(struct kiocb *iocb, struct iov_iter *from) |
727 | unsigned long nr_segs, loff_t o) | ||
728 | { | 625 | { |
729 | struct ep_data *epdata = iocb->ki_filp->private_data; | 626 | struct file *file = iocb->ki_filp; |
730 | char *buf; | 627 | struct ep_data *epdata = file->private_data; |
731 | size_t len = 0; | 628 | size_t len = iov_iter_count(from); |
732 | int i = 0; | 629 | ssize_t value; |
630 | char *buf; | ||
733 | 631 | ||
734 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 632 | if ((value = get_ready_ep(file->f_flags, epdata)) < 0) |
735 | return -EINVAL; | 633 | return value; |
736 | 634 | ||
737 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 635 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
738 | if (unlikely(!buf)) | 636 | if (!usb_endpoint_dir_in(&epdata->desc)) { |
637 | if (usb_endpoint_xfer_isoc(&epdata->desc) || | ||
638 | !is_sync_kiocb(iocb)) { | ||
639 | mutex_unlock(&epdata->lock); | ||
640 | return -EINVAL; | ||
641 | } | ||
642 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
643 | spin_lock_irq(&epdata->dev->lock); | ||
644 | if (likely(epdata->ep != NULL)) | ||
645 | usb_ep_set_halt(epdata->ep); | ||
646 | spin_unlock_irq(&epdata->dev->lock); | ||
647 | mutex_unlock(&epdata->lock); | ||
648 | return -EBADMSG; | ||
649 | } | ||
650 | |||
651 | buf = kmalloc(len, GFP_KERNEL); | ||
652 | if (unlikely(!buf)) { | ||
653 | mutex_unlock(&epdata->lock); | ||
739 | return -ENOMEM; | 654 | return -ENOMEM; |
655 | } | ||
740 | 656 | ||
741 | for (i=0; i < nr_segs; i++) { | 657 | if (unlikely(copy_from_iter(buf, len, from) != len)) { |
742 | if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, | 658 | value = -EFAULT; |
743 | iov[i].iov_len) != 0)) { | 659 | goto out; |
744 | kfree(buf); | 660 | } |
745 | return -EFAULT; | 661 | |
662 | if (is_sync_kiocb(iocb)) { | ||
663 | value = ep_io(epdata, buf, len); | ||
664 | } else { | ||
665 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
666 | value = -ENOMEM; | ||
667 | if (priv) { | ||
668 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
669 | if (value == -EIOCBQUEUED) | ||
670 | buf = NULL; | ||
746 | } | 671 | } |
747 | len += iov[i].iov_len; | ||
748 | } | 672 | } |
749 | return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); | 673 | out: |
674 | kfree(buf); | ||
675 | mutex_unlock(&epdata->lock); | ||
676 | return value; | ||
750 | } | 677 | } |
751 | 678 | ||
752 | /*----------------------------------------------------------------------*/ | 679 | /*----------------------------------------------------------------------*/ |
@@ -756,13 +683,13 @@ static const struct file_operations ep_io_operations = { | |||
756 | .owner = THIS_MODULE, | 683 | .owner = THIS_MODULE, |
757 | .llseek = no_llseek, | 684 | .llseek = no_llseek, |
758 | 685 | ||
759 | .read = ep_read, | 686 | .read = new_sync_read, |
760 | .write = ep_write, | 687 | .write = new_sync_write, |
761 | .unlocked_ioctl = ep_ioctl, | 688 | .unlocked_ioctl = ep_ioctl, |
762 | .release = ep_release, | 689 | .release = ep_release, |
763 | 690 | ||
764 | .aio_read = ep_aio_read, | 691 | .read_iter = ep_read_iter, |
765 | .aio_write = ep_aio_write, | 692 | .write_iter = ep_write_iter, |
766 | }; | 693 | }; |
767 | 694 | ||
768 | /* ENDPOINT INITIALIZATION | 695 | /* ENDPOINT INITIALIZATION |