diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-07 08:04:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-07 08:04:56 -0400 |
commit | dc92b1f9ab1e1665dbbc56911782358e7f9a49f9 (patch) | |
tree | 965ccb4a0f2c24a8b24adce415f6506246d07a90 /drivers/char | |
parent | 5e090ed7af10729a396a25df43d69a236e789736 (diff) | |
parent | ca16f580a5db7e60bfafe59a50bb133bd3347491 (diff) |
Merge branch 'virtio-next' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio changes from Rusty Russell:
"New workflow: same git trees pulled by linux-next get sent straight to
Linus. Git is awkward at shuffling patches compared with quilt or mq,
but that doesn't happen often once things get into my -next branch."
* 'virtio-next' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (24 commits)
lguest: fix occasional crash in example launcher.
virtio-blk: Disable callback in virtblk_done()
virtio_mmio: Don't attempt to create empty virtqueues
virtio_mmio: fix off by one error allocating queue
drivers/virtio/virtio_pci.c: fix error return code
virtio: don't crash when device is buggy
virtio: remove CONFIG_VIRTIO_RING
virtio: add help to CONFIG_VIRTIO option.
virtio: support reserved vqs
virtio: introduce an API to set affinity for a virtqueue
virtio-ring: move queue_index to vring_virtqueue
virtio_balloon: not EXPERIMENTAL any more.
virtio-balloon: dependency fix
virtio-blk: fix NULL checking in virtblk_alloc_req()
virtio-blk: Add REQ_FLUSH and REQ_FUA support to bio path
virtio-blk: Add bio-based IO path for virtio-blk
virtio: console: fix error handling in init() function
tools: Fix pthread flag for Makefile of trace-agent used by virtio-trace
tools: Add guest trace agent as a user tool
virtio/console: Allocate scatterlist according to the current pipe size
...
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/virtio_console.c | 198 |
1 files changed, 178 insertions, 20 deletions
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 060a672ebb7b..8ab9c3d4bf13 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/splice.h> | ||
28 | #include <linux/pagemap.h> | ||
27 | #include <linux/init.h> | 29 | #include <linux/init.h> |
28 | #include <linux/list.h> | 30 | #include <linux/list.h> |
29 | #include <linux/poll.h> | 31 | #include <linux/poll.h> |
@@ -474,26 +476,53 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, | |||
474 | return 0; | 476 | return 0; |
475 | } | 477 | } |
476 | 478 | ||
479 | struct buffer_token { | ||
480 | union { | ||
481 | void *buf; | ||
482 | struct scatterlist *sg; | ||
483 | } u; | ||
484 | /* If sgpages == 0 then buf is used, else sg is used */ | ||
485 | unsigned int sgpages; | ||
486 | }; | ||
487 | |||
488 | static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) | ||
489 | { | ||
490 | int i; | ||
491 | struct page *page; | ||
492 | |||
493 | for (i = 0; i < nrpages; i++) { | ||
494 | page = sg_page(&sg[i]); | ||
495 | if (!page) | ||
496 | break; | ||
497 | put_page(page); | ||
498 | } | ||
499 | kfree(sg); | ||
500 | } | ||
501 | |||
477 | /* Callers must take the port->outvq_lock */ | 502 | /* Callers must take the port->outvq_lock */ |
478 | static void reclaim_consumed_buffers(struct port *port) | 503 | static void reclaim_consumed_buffers(struct port *port) |
479 | { | 504 | { |
480 | void *buf; | 505 | struct buffer_token *tok; |
481 | unsigned int len; | 506 | unsigned int len; |
482 | 507 | ||
483 | if (!port->portdev) { | 508 | if (!port->portdev) { |
484 | /* Device has been unplugged. vqs are already gone. */ | 509 | /* Device has been unplugged. vqs are already gone. */ |
485 | return; | 510 | return; |
486 | } | 511 | } |
487 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { | 512 | while ((tok = virtqueue_get_buf(port->out_vq, &len))) { |
488 | kfree(buf); | 513 | if (tok->sgpages) |
514 | reclaim_sg_pages(tok->u.sg, tok->sgpages); | ||
515 | else | ||
516 | kfree(tok->u.buf); | ||
517 | kfree(tok); | ||
489 | port->outvq_full = false; | 518 | port->outvq_full = false; |
490 | } | 519 | } |
491 | } | 520 | } |
492 | 521 | ||
493 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | 522 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, |
494 | bool nonblock) | 523 | int nents, size_t in_count, |
524 | struct buffer_token *tok, bool nonblock) | ||
495 | { | 525 | { |
496 | struct scatterlist sg[1]; | ||
497 | struct virtqueue *out_vq; | 526 | struct virtqueue *out_vq; |
498 | ssize_t ret; | 527 | ssize_t ret; |
499 | unsigned long flags; | 528 | unsigned long flags; |
@@ -505,8 +534,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | |||
505 | 534 | ||
506 | reclaim_consumed_buffers(port); | 535 | reclaim_consumed_buffers(port); |
507 | 536 | ||
508 | sg_init_one(sg, in_buf, in_count); | 537 | ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); |
509 | ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC); | ||
510 | 538 | ||
511 | /* Tell Host to go! */ | 539 | /* Tell Host to go! */ |
512 | virtqueue_kick(out_vq); | 540 | virtqueue_kick(out_vq); |
@@ -544,6 +572,37 @@ done: | |||
544 | return in_count; | 572 | return in_count; |
545 | } | 573 | } |
546 | 574 | ||
575 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | ||
576 | bool nonblock) | ||
577 | { | ||
578 | struct scatterlist sg[1]; | ||
579 | struct buffer_token *tok; | ||
580 | |||
581 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
582 | if (!tok) | ||
583 | return -ENOMEM; | ||
584 | tok->sgpages = 0; | ||
585 | tok->u.buf = in_buf; | ||
586 | |||
587 | sg_init_one(sg, in_buf, in_count); | ||
588 | |||
589 | return __send_to_port(port, sg, 1, in_count, tok, nonblock); | ||
590 | } | ||
591 | |||
592 | static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, | ||
593 | size_t in_count, bool nonblock) | ||
594 | { | ||
595 | struct buffer_token *tok; | ||
596 | |||
597 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
598 | if (!tok) | ||
599 | return -ENOMEM; | ||
600 | tok->sgpages = nents; | ||
601 | tok->u.sg = sg; | ||
602 | |||
603 | return __send_to_port(port, sg, nents, in_count, tok, nonblock); | ||
604 | } | ||
605 | |||
547 | /* | 606 | /* |
548 | * Give out the data that's requested from the buffer that we have | 607 | * Give out the data that's requested from the buffer that we have |
549 | * queued up. | 608 | * queued up. |
@@ -665,6 +724,26 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
665 | return fill_readbuf(port, ubuf, count, true); | 724 | return fill_readbuf(port, ubuf, count, true); |
666 | } | 725 | } |
667 | 726 | ||
727 | static int wait_port_writable(struct port *port, bool nonblock) | ||
728 | { | ||
729 | int ret; | ||
730 | |||
731 | if (will_write_block(port)) { | ||
732 | if (nonblock) | ||
733 | return -EAGAIN; | ||
734 | |||
735 | ret = wait_event_freezable(port->waitqueue, | ||
736 | !will_write_block(port)); | ||
737 | if (ret < 0) | ||
738 | return ret; | ||
739 | } | ||
740 | /* Port got hot-unplugged. */ | ||
741 | if (!port->guest_connected) | ||
742 | return -ENODEV; | ||
743 | |||
744 | return 0; | ||
745 | } | ||
746 | |||
668 | static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | 747 | static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, |
669 | size_t count, loff_t *offp) | 748 | size_t count, loff_t *offp) |
670 | { | 749 | { |
@@ -681,18 +760,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
681 | 760 | ||
682 | nonblock = filp->f_flags & O_NONBLOCK; | 761 | nonblock = filp->f_flags & O_NONBLOCK; |
683 | 762 | ||
684 | if (will_write_block(port)) { | 763 | ret = wait_port_writable(port, nonblock); |
685 | if (nonblock) | 764 | if (ret < 0) |
686 | return -EAGAIN; | 765 | return ret; |
687 | |||
688 | ret = wait_event_freezable(port->waitqueue, | ||
689 | !will_write_block(port)); | ||
690 | if (ret < 0) | ||
691 | return ret; | ||
692 | } | ||
693 | /* Port got hot-unplugged. */ | ||
694 | if (!port->guest_connected) | ||
695 | return -ENODEV; | ||
696 | 766 | ||
697 | count = min((size_t)(32 * 1024), count); | 767 | count = min((size_t)(32 * 1024), count); |
698 | 768 | ||
@@ -725,6 +795,93 @@ out: | |||
725 | return ret; | 795 | return ret; |
726 | } | 796 | } |
727 | 797 | ||
798 | struct sg_list { | ||
799 | unsigned int n; | ||
800 | unsigned int size; | ||
801 | size_t len; | ||
802 | struct scatterlist *sg; | ||
803 | }; | ||
804 | |||
805 | static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | ||
806 | struct splice_desc *sd) | ||
807 | { | ||
808 | struct sg_list *sgl = sd->u.data; | ||
809 | unsigned int offset, len; | ||
810 | |||
811 | if (sgl->n == sgl->size) | ||
812 | return 0; | ||
813 | |||
814 | /* Try lock this page */ | ||
815 | if (buf->ops->steal(pipe, buf) == 0) { | ||
816 | /* Get reference and unlock page for moving */ | ||
817 | get_page(buf->page); | ||
818 | unlock_page(buf->page); | ||
819 | |||
820 | len = min(buf->len, sd->len); | ||
821 | sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); | ||
822 | } else { | ||
823 | /* Failback to copying a page */ | ||
824 | struct page *page = alloc_page(GFP_KERNEL); | ||
825 | char *src = buf->ops->map(pipe, buf, 1); | ||
826 | char *dst; | ||
827 | |||
828 | if (!page) | ||
829 | return -ENOMEM; | ||
830 | dst = kmap(page); | ||
831 | |||
832 | offset = sd->pos & ~PAGE_MASK; | ||
833 | |||
834 | len = sd->len; | ||
835 | if (len + offset > PAGE_SIZE) | ||
836 | len = PAGE_SIZE - offset; | ||
837 | |||
838 | memcpy(dst + offset, src + buf->offset, len); | ||
839 | |||
840 | kunmap(page); | ||
841 | buf->ops->unmap(pipe, buf, src); | ||
842 | |||
843 | sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); | ||
844 | } | ||
845 | sgl->n++; | ||
846 | sgl->len += len; | ||
847 | |||
848 | return len; | ||
849 | } | ||
850 | |||
851 | /* Faster zero-copy write by splicing */ | ||
852 | static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | ||
853 | struct file *filp, loff_t *ppos, | ||
854 | size_t len, unsigned int flags) | ||
855 | { | ||
856 | struct port *port = filp->private_data; | ||
857 | struct sg_list sgl; | ||
858 | ssize_t ret; | ||
859 | struct splice_desc sd = { | ||
860 | .total_len = len, | ||
861 | .flags = flags, | ||
862 | .pos = *ppos, | ||
863 | .u.data = &sgl, | ||
864 | }; | ||
865 | |||
866 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | ||
867 | if (ret < 0) | ||
868 | return ret; | ||
869 | |||
870 | sgl.n = 0; | ||
871 | sgl.len = 0; | ||
872 | sgl.size = pipe->nrbufs; | ||
873 | sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); | ||
874 | if (unlikely(!sgl.sg)) | ||
875 | return -ENOMEM; | ||
876 | |||
877 | sg_init_table(sgl.sg, sgl.size); | ||
878 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | ||
879 | if (likely(ret > 0)) | ||
880 | ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | |||
728 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | 885 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) |
729 | { | 886 | { |
730 | struct port *port; | 887 | struct port *port; |
@@ -856,6 +1013,7 @@ static const struct file_operations port_fops = { | |||
856 | .open = port_fops_open, | 1013 | .open = port_fops_open, |
857 | .read = port_fops_read, | 1014 | .read = port_fops_read, |
858 | .write = port_fops_write, | 1015 | .write = port_fops_write, |
1016 | .splice_write = port_fops_splice_write, | ||
859 | .poll = port_fops_poll, | 1017 | .poll = port_fops_poll, |
860 | .release = port_fops_release, | 1018 | .release = port_fops_release, |
861 | .fasync = port_fops_fasync, | 1019 | .fasync = port_fops_fasync, |