diff options
| author | Sjur Brændeland <sjur.brandeland@stericsson.com> | 2012-12-13 22:16:42 -0500 |
|---|---|---|
| committer | Rusty Russell <rusty@rustcorp.com.au> | 2012-12-17 23:50:44 -0500 |
| commit | 276a3e954cfe4da7c492c9063741f99290d2973e (patch) | |
| tree | b2f697bcb414219a157f837c7183fedfd0eb7daa /drivers | |
| parent | 9a2bdcc85d28506d4e5d4a9618fb133a3f40945d (diff) | |
virtio_console: Merge struct buffer_token into struct port_buffer
Refactoring the splice functionality by unifying the approach for
sending scatter-lists and regular buffers. This simplifies
buffer handling and reduces code size. Splice will now allocate
a port_buffer and send_buf() and free_buf() can always be used
for any buffer.
Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Acked-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/char/virtio_console.c | 129 |
1 files changed, 53 insertions, 76 deletions
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index db244b5b6c8a..548224686963 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -111,6 +111,12 @@ struct port_buffer { | |||
| 111 | size_t len; | 111 | size_t len; |
| 112 | /* offset in the buf from which to consume data */ | 112 | /* offset in the buf from which to consume data */ |
| 113 | size_t offset; | 113 | size_t offset; |
| 114 | |||
| 115 | /* If sgpages == 0 then buf is used */ | ||
| 116 | unsigned int sgpages; | ||
| 117 | |||
| 118 | /* sg is used if spages > 0. sg must be the last in is struct */ | ||
| 119 | struct scatterlist sg[0]; | ||
| 114 | }; | 120 | }; |
| 115 | 121 | ||
| 116 | /* | 122 | /* |
| @@ -338,17 +344,39 @@ static inline bool use_multiport(struct ports_device *portdev) | |||
| 338 | 344 | ||
| 339 | static void free_buf(struct port_buffer *buf) | 345 | static void free_buf(struct port_buffer *buf) |
| 340 | { | 346 | { |
| 347 | unsigned int i; | ||
| 348 | |||
| 341 | kfree(buf->buf); | 349 | kfree(buf->buf); |
| 350 | for (i = 0; i < buf->sgpages; i++) { | ||
| 351 | struct page *page = sg_page(&buf->sg[i]); | ||
| 352 | if (!page) | ||
| 353 | break; | ||
| 354 | put_page(page); | ||
| 355 | } | ||
| 356 | |||
| 342 | kfree(buf); | 357 | kfree(buf); |
| 343 | } | 358 | } |
| 344 | 359 | ||
| 345 | static struct port_buffer *alloc_buf(size_t buf_size) | 360 | static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, |
| 361 | int pages) | ||
| 346 | { | 362 | { |
| 347 | struct port_buffer *buf; | 363 | struct port_buffer *buf; |
| 348 | 364 | ||
| 349 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); | 365 | /* |
| 366 | * Allocate buffer and the sg list. The sg list array is allocated | ||
| 367 | * directly after the port_buffer struct. | ||
| 368 | */ | ||
| 369 | buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages, | ||
| 370 | GFP_KERNEL); | ||
| 350 | if (!buf) | 371 | if (!buf) |
| 351 | goto fail; | 372 | goto fail; |
| 373 | |||
| 374 | buf->sgpages = pages; | ||
| 375 | if (pages > 0) { | ||
| 376 | buf->buf = NULL; | ||
| 377 | return buf; | ||
| 378 | } | ||
| 379 | |||
| 352 | buf->buf = kmalloc(buf_size, GFP_KERNEL); | 380 | buf->buf = kmalloc(buf_size, GFP_KERNEL); |
| 353 | if (!buf->buf) | 381 | if (!buf->buf) |
| 354 | goto free_buf; | 382 | goto free_buf; |
| @@ -478,52 +506,26 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, | |||
| 478 | return 0; | 506 | return 0; |
| 479 | } | 507 | } |
| 480 | 508 | ||
| 481 | struct buffer_token { | ||
| 482 | union { | ||
| 483 | void *buf; | ||
| 484 | struct scatterlist *sg; | ||
| 485 | } u; | ||
| 486 | /* If sgpages == 0 then buf is used, else sg is used */ | ||
| 487 | unsigned int sgpages; | ||
| 488 | }; | ||
| 489 | |||
| 490 | static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) | ||
| 491 | { | ||
| 492 | int i; | ||
| 493 | struct page *page; | ||
| 494 | |||
| 495 | for (i = 0; i < nrpages; i++) { | ||
| 496 | page = sg_page(&sg[i]); | ||
| 497 | if (!page) | ||
| 498 | break; | ||
| 499 | put_page(page); | ||
| 500 | } | ||
| 501 | kfree(sg); | ||
| 502 | } | ||
| 503 | 509 | ||
| 504 | /* Callers must take the port->outvq_lock */ | 510 | /* Callers must take the port->outvq_lock */ |
| 505 | static void reclaim_consumed_buffers(struct port *port) | 511 | static void reclaim_consumed_buffers(struct port *port) |
| 506 | { | 512 | { |
| 507 | struct buffer_token *tok; | 513 | struct port_buffer *buf; |
| 508 | unsigned int len; | 514 | unsigned int len; |
| 509 | 515 | ||
| 510 | if (!port->portdev) { | 516 | if (!port->portdev) { |
| 511 | /* Device has been unplugged. vqs are already gone. */ | 517 | /* Device has been unplugged. vqs are already gone. */ |
| 512 | return; | 518 | return; |
| 513 | } | 519 | } |
| 514 | while ((tok = virtqueue_get_buf(port->out_vq, &len))) { | 520 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { |
| 515 | if (tok->sgpages) | 521 | free_buf(buf); |
| 516 | reclaim_sg_pages(tok->u.sg, tok->sgpages); | ||
| 517 | else | ||
| 518 | kfree(tok->u.buf); | ||
| 519 | kfree(tok); | ||
| 520 | port->outvq_full = false; | 522 | port->outvq_full = false; |
| 521 | } | 523 | } |
| 522 | } | 524 | } |
| 523 | 525 | ||
| 524 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | 526 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, |
| 525 | int nents, size_t in_count, | 527 | int nents, size_t in_count, |
| 526 | struct buffer_token *tok, bool nonblock) | 528 | void *data, bool nonblock) |
| 527 | { | 529 | { |
| 528 | struct virtqueue *out_vq; | 530 | struct virtqueue *out_vq; |
| 529 | int err; | 531 | int err; |
| @@ -536,7 +538,7 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | |||
| 536 | 538 | ||
| 537 | reclaim_consumed_buffers(port); | 539 | reclaim_consumed_buffers(port); |
| 538 | 540 | ||
| 539 | err = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); | 541 | err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC); |
| 540 | 542 | ||
| 541 | /* Tell Host to go! */ | 543 | /* Tell Host to go! */ |
| 542 | virtqueue_kick(out_vq); | 544 | virtqueue_kick(out_vq); |
| @@ -574,37 +576,6 @@ done: | |||
| 574 | return in_count; | 576 | return in_count; |
| 575 | } | 577 | } |
| 576 | 578 | ||
| 577 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | ||
| 578 | bool nonblock) | ||
| 579 | { | ||
| 580 | struct scatterlist sg[1]; | ||
| 581 | struct buffer_token *tok; | ||
| 582 | |||
| 583 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
| 584 | if (!tok) | ||
| 585 | return -ENOMEM; | ||
| 586 | tok->sgpages = 0; | ||
| 587 | tok->u.buf = in_buf; | ||
| 588 | |||
| 589 | sg_init_one(sg, in_buf, in_count); | ||
| 590 | |||
| 591 | return __send_to_port(port, sg, 1, in_count, tok, nonblock); | ||
| 592 | } | ||
| 593 | |||
| 594 | static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, | ||
| 595 | size_t in_count, bool nonblock) | ||
| 596 | { | ||
| 597 | struct buffer_token *tok; | ||
| 598 | |||
| 599 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
| 600 | if (!tok) | ||
| 601 | return -ENOMEM; | ||
| 602 | tok->sgpages = nents; | ||
| 603 | tok->u.sg = sg; | ||
| 604 | |||
| 605 | return __send_to_port(port, sg, nents, in_count, tok, nonblock); | ||
| 606 | } | ||
| 607 | |||
| 608 | /* | 579 | /* |
| 609 | * Give out the data that's requested from the buffer that we have | 580 | * Give out the data that's requested from the buffer that we have |
| 610 | * queued up. | 581 | * queued up. |
| @@ -750,9 +721,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
| 750 | size_t count, loff_t *offp) | 721 | size_t count, loff_t *offp) |
| 751 | { | 722 | { |
| 752 | struct port *port; | 723 | struct port *port; |
| 753 | char *buf; | 724 | struct port_buffer *buf; |
| 754 | ssize_t ret; | 725 | ssize_t ret; |
| 755 | bool nonblock; | 726 | bool nonblock; |
| 727 | struct scatterlist sg[1]; | ||
| 756 | 728 | ||
| 757 | /* Userspace could be out to fool us */ | 729 | /* Userspace could be out to fool us */ |
| 758 | if (!count) | 730 | if (!count) |
| @@ -768,11 +740,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
| 768 | 740 | ||
| 769 | count = min((size_t)(32 * 1024), count); | 741 | count = min((size_t)(32 * 1024), count); |
| 770 | 742 | ||
| 771 | buf = kmalloc(count, GFP_KERNEL); | 743 | buf = alloc_buf(port->out_vq, count, 0); |
| 772 | if (!buf) | 744 | if (!buf) |
| 773 | return -ENOMEM; | 745 | return -ENOMEM; |
| 774 | 746 | ||
| 775 | ret = copy_from_user(buf, ubuf, count); | 747 | ret = copy_from_user(buf->buf, ubuf, count); |
| 776 | if (ret) { | 748 | if (ret) { |
| 777 | ret = -EFAULT; | 749 | ret = -EFAULT; |
| 778 | goto free_buf; | 750 | goto free_buf; |
| @@ -786,13 +758,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
| 786 | * through to the host. | 758 | * through to the host. |
| 787 | */ | 759 | */ |
| 788 | nonblock = true; | 760 | nonblock = true; |
| 789 | ret = send_buf(port, buf, count, nonblock); | 761 | sg_init_one(sg, buf->buf, count); |
| 762 | ret = __send_to_port(port, sg, 1, count, buf, nonblock); | ||
| 790 | 763 | ||
| 791 | if (nonblock && ret > 0) | 764 | if (nonblock && ret > 0) |
| 792 | goto out; | 765 | goto out; |
| 793 | 766 | ||
| 794 | free_buf: | 767 | free_buf: |
| 795 | kfree(buf); | 768 | free_buf(buf); |
| 796 | out: | 769 | out: |
| 797 | return ret; | 770 | return ret; |
| 798 | } | 771 | } |
| @@ -858,6 +831,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
| 858 | struct port *port = filp->private_data; | 831 | struct port *port = filp->private_data; |
| 859 | struct sg_list sgl; | 832 | struct sg_list sgl; |
| 860 | ssize_t ret; | 833 | ssize_t ret; |
| 834 | struct port_buffer *buf; | ||
| 861 | struct splice_desc sd = { | 835 | struct splice_desc sd = { |
| 862 | .total_len = len, | 836 | .total_len = len, |
| 863 | .flags = flags, | 837 | .flags = flags, |
| @@ -869,17 +843,18 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
| 869 | if (ret < 0) | 843 | if (ret < 0) |
| 870 | return ret; | 844 | return ret; |
| 871 | 845 | ||
| 846 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); | ||
| 847 | if (!buf) | ||
| 848 | return -ENOMEM; | ||
| 849 | |||
| 872 | sgl.n = 0; | 850 | sgl.n = 0; |
| 873 | sgl.len = 0; | 851 | sgl.len = 0; |
| 874 | sgl.size = pipe->nrbufs; | 852 | sgl.size = pipe->nrbufs; |
| 875 | sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); | 853 | sgl.sg = buf->sg; |
| 876 | if (unlikely(!sgl.sg)) | ||
| 877 | return -ENOMEM; | ||
| 878 | |||
| 879 | sg_init_table(sgl.sg, sgl.size); | 854 | sg_init_table(sgl.sg, sgl.size); |
| 880 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 855 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); |
| 881 | if (likely(ret > 0)) | 856 | if (likely(ret > 0)) |
| 882 | ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); | 857 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); |
| 883 | 858 | ||
| 884 | if (unlikely(ret <= 0)) | 859 | if (unlikely(ret <= 0)) |
| 885 | kfree(sgl.sg); | 860 | kfree(sgl.sg); |
| @@ -1035,6 +1010,7 @@ static const struct file_operations port_fops = { | |||
| 1035 | static int put_chars(u32 vtermno, const char *buf, int count) | 1010 | static int put_chars(u32 vtermno, const char *buf, int count) |
| 1036 | { | 1011 | { |
| 1037 | struct port *port; | 1012 | struct port *port; |
| 1013 | struct scatterlist sg[1]; | ||
| 1038 | 1014 | ||
| 1039 | if (unlikely(early_put_chars)) | 1015 | if (unlikely(early_put_chars)) |
| 1040 | return early_put_chars(vtermno, buf, count); | 1016 | return early_put_chars(vtermno, buf, count); |
| @@ -1043,7 +1019,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
| 1043 | if (!port) | 1019 | if (!port) |
| 1044 | return -EPIPE; | 1020 | return -EPIPE; |
| 1045 | 1021 | ||
| 1046 | return send_buf(port, (void *)buf, count, false); | 1022 | sg_init_one(sg, buf, count); |
| 1023 | return __send_to_port(port, sg, 1, count, (void *)buf, false); | ||
| 1047 | } | 1024 | } |
| 1048 | 1025 | ||
| 1049 | /* | 1026 | /* |
| @@ -1264,7 +1241,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | |||
| 1264 | 1241 | ||
| 1265 | nr_added_bufs = 0; | 1242 | nr_added_bufs = 0; |
| 1266 | do { | 1243 | do { |
| 1267 | buf = alloc_buf(PAGE_SIZE); | 1244 | buf = alloc_buf(vq, PAGE_SIZE, 0); |
| 1268 | if (!buf) | 1245 | if (!buf) |
| 1269 | break; | 1246 | break; |
| 1270 | 1247 | ||
