diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 11:37:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 11:37:05 -0500 |
commit | b7dfde956daee23f4439d0c8562a5e38b43e79d9 (patch) | |
tree | 2ed71fb5c5eac6957fd1e1ad0a67be6c3282167a /drivers | |
parent | 03c850ec327c42a97e44c448b75983e12da417d9 (diff) | |
parent | 1b6370463e88b0c1c317de16d7b962acc1dab4f2 (diff) |
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio update from Rusty Russell:
"Some nice cleanups, and even a patch my wife did as a "live" demo for
Latinoware 2012.
There's a slightly non-trivial merge in virtio-net, as we cleaned up
the virtio add_buf interface while DaveM accepted the mq virtio-net
patches."
* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (27 commits)
virtio_console: Add support for remoteproc serial
virtio_console: Merge struct buffer_token into struct port_buffer
virtio: add drv_to_virtio to make code clearly
virtio: use dev_to_virtio wrapper in virtio
virtio-mmio: Fix irq parsing in command line parameter
virtio_console: Free buffers from out-queue upon close
virtio: Convert dev_printk(KERN_<LEVEL> to dev_<level>(
virtio_console: Use kmalloc instead of kzalloc
virtio_console: Free buffer if splice fails
virtio: tools: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: scsi: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: rpmsg: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: net: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: console: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: make virtqueue_add_buf() returning 0 on success, not capacity.
virtio: console: don't rely on virtqueue_add_buf() returning capacity.
virtio_net: don't rely on virtqueue_add_buf() returning capacity.
virtio-net: remove unused skb_vnet_hdr->num_sg field
virtio-net: correct capacity math on ring full
virtio: move queue_index and num_free fields into core struct virtqueue.
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/virtio_console.c | 329 | ||||
-rw-r--r-- | drivers/lguest/core.c | 2 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 48 | ||||
-rw-r--r-- | drivers/rpmsg/virtio_rpmsg_bus.c | 6 | ||||
-rw-r--r-- | drivers/scsi/virtio_scsi.c | 24 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 30 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 7 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 30 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 20 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 46 |
10 files changed, 322 insertions, 220 deletions
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 90493d4ead1f..c594cb16c37b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -37,8 +37,12 @@ | |||
37 | #include <linux/wait.h> | 37 | #include <linux/wait.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/dma-mapping.h> | ||
41 | #include <linux/kconfig.h> | ||
40 | #include "../tty/hvc/hvc_console.h" | 42 | #include "../tty/hvc/hvc_console.h" |
41 | 43 | ||
44 | #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) | ||
45 | |||
42 | /* | 46 | /* |
43 | * This is a global struct for storing common data for all the devices | 47 | * This is a global struct for storing common data for all the devices |
44 | * this driver handles. | 48 | * this driver handles. |
@@ -111,6 +115,21 @@ struct port_buffer { | |||
111 | size_t len; | 115 | size_t len; |
112 | /* offset in the buf from which to consume data */ | 116 | /* offset in the buf from which to consume data */ |
113 | size_t offset; | 117 | size_t offset; |
118 | |||
119 | /* DMA address of buffer */ | ||
120 | dma_addr_t dma; | ||
121 | |||
122 | /* Device we got DMA memory from */ | ||
123 | struct device *dev; | ||
124 | |||
125 | /* List of pending dma buffers to free */ | ||
126 | struct list_head list; | ||
127 | |||
128 | /* If sgpages == 0 then buf is used */ | ||
129 | unsigned int sgpages; | ||
130 | |||
131 | /* sg is used if spages > 0. sg must be the last in is struct */ | ||
132 | struct scatterlist sg[0]; | ||
114 | }; | 133 | }; |
115 | 134 | ||
116 | /* | 135 | /* |
@@ -325,6 +344,11 @@ static bool is_console_port(struct port *port) | |||
325 | return false; | 344 | return false; |
326 | } | 345 | } |
327 | 346 | ||
347 | static bool is_rproc_serial(const struct virtio_device *vdev) | ||
348 | { | ||
349 | return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL; | ||
350 | } | ||
351 | |||
328 | static inline bool use_multiport(struct ports_device *portdev) | 352 | static inline bool use_multiport(struct ports_device *portdev) |
329 | { | 353 | { |
330 | /* | 354 | /* |
@@ -336,20 +360,110 @@ static inline bool use_multiport(struct ports_device *portdev) | |||
336 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | 360 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); |
337 | } | 361 | } |
338 | 362 | ||
339 | static void free_buf(struct port_buffer *buf) | 363 | static DEFINE_SPINLOCK(dma_bufs_lock); |
364 | static LIST_HEAD(pending_free_dma_bufs); | ||
365 | |||
366 | static void free_buf(struct port_buffer *buf, bool can_sleep) | ||
340 | { | 367 | { |
341 | kfree(buf->buf); | 368 | unsigned int i; |
369 | |||
370 | for (i = 0; i < buf->sgpages; i++) { | ||
371 | struct page *page = sg_page(&buf->sg[i]); | ||
372 | if (!page) | ||
373 | break; | ||
374 | put_page(page); | ||
375 | } | ||
376 | |||
377 | if (!buf->dev) { | ||
378 | kfree(buf->buf); | ||
379 | } else if (is_rproc_enabled) { | ||
380 | unsigned long flags; | ||
381 | |||
382 | /* dma_free_coherent requires interrupts to be enabled. */ | ||
383 | if (!can_sleep) { | ||
384 | /* queue up dma-buffers to be freed later */ | ||
385 | spin_lock_irqsave(&dma_bufs_lock, flags); | ||
386 | list_add_tail(&buf->list, &pending_free_dma_bufs); | ||
387 | spin_unlock_irqrestore(&dma_bufs_lock, flags); | ||
388 | return; | ||
389 | } | ||
390 | dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma); | ||
391 | |||
392 | /* Release device refcnt and allow it to be freed */ | ||
393 | put_device(buf->dev); | ||
394 | } | ||
395 | |||
342 | kfree(buf); | 396 | kfree(buf); |
343 | } | 397 | } |
344 | 398 | ||
345 | static struct port_buffer *alloc_buf(size_t buf_size) | 399 | static void reclaim_dma_bufs(void) |
400 | { | ||
401 | unsigned long flags; | ||
402 | struct port_buffer *buf, *tmp; | ||
403 | LIST_HEAD(tmp_list); | ||
404 | |||
405 | if (list_empty(&pending_free_dma_bufs)) | ||
406 | return; | ||
407 | |||
408 | /* Create a copy of the pending_free_dma_bufs while holding the lock */ | ||
409 | spin_lock_irqsave(&dma_bufs_lock, flags); | ||
410 | list_cut_position(&tmp_list, &pending_free_dma_bufs, | ||
411 | pending_free_dma_bufs.prev); | ||
412 | spin_unlock_irqrestore(&dma_bufs_lock, flags); | ||
413 | |||
414 | /* Release the dma buffers, without irqs enabled */ | ||
415 | list_for_each_entry_safe(buf, tmp, &tmp_list, list) { | ||
416 | list_del(&buf->list); | ||
417 | free_buf(buf, true); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, | ||
422 | int pages) | ||
346 | { | 423 | { |
347 | struct port_buffer *buf; | 424 | struct port_buffer *buf; |
348 | 425 | ||
349 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); | 426 | reclaim_dma_bufs(); |
427 | |||
428 | /* | ||
429 | * Allocate buffer and the sg list. The sg list array is allocated | ||
430 | * directly after the port_buffer struct. | ||
431 | */ | ||
432 | buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages, | ||
433 | GFP_KERNEL); | ||
350 | if (!buf) | 434 | if (!buf) |
351 | goto fail; | 435 | goto fail; |
352 | buf->buf = kzalloc(buf_size, GFP_KERNEL); | 436 | |
437 | buf->sgpages = pages; | ||
438 | if (pages > 0) { | ||
439 | buf->dev = NULL; | ||
440 | buf->buf = NULL; | ||
441 | return buf; | ||
442 | } | ||
443 | |||
444 | if (is_rproc_serial(vq->vdev)) { | ||
445 | /* | ||
446 | * Allocate DMA memory from ancestor. When a virtio | ||
447 | * device is created by remoteproc, the DMA memory is | ||
448 | * associated with the grandparent device: | ||
449 | * vdev => rproc => platform-dev. | ||
450 | * The code here would have been less quirky if | ||
451 | * DMA_MEMORY_INCLUDES_CHILDREN had been supported | ||
452 | * in dma-coherent.c | ||
453 | */ | ||
454 | if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) | ||
455 | goto free_buf; | ||
456 | buf->dev = vq->vdev->dev.parent->parent; | ||
457 | |||
458 | /* Increase device refcnt to avoid freeing it */ | ||
459 | get_device(buf->dev); | ||
460 | buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma, | ||
461 | GFP_KERNEL); | ||
462 | } else { | ||
463 | buf->dev = NULL; | ||
464 | buf->buf = kmalloc(buf_size, GFP_KERNEL); | ||
465 | } | ||
466 | |||
353 | if (!buf->buf) | 467 | if (!buf->buf) |
354 | goto free_buf; | 468 | goto free_buf; |
355 | buf->len = 0; | 469 | buf->len = 0; |
@@ -396,6 +510,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | |||
396 | 510 | ||
397 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); | 511 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); |
398 | virtqueue_kick(vq); | 512 | virtqueue_kick(vq); |
513 | if (!ret) | ||
514 | ret = vq->num_free; | ||
399 | return ret; | 515 | return ret; |
400 | } | 516 | } |
401 | 517 | ||
@@ -416,7 +532,7 @@ static void discard_port_data(struct port *port) | |||
416 | port->stats.bytes_discarded += buf->len - buf->offset; | 532 | port->stats.bytes_discarded += buf->len - buf->offset; |
417 | if (add_inbuf(port->in_vq, buf) < 0) { | 533 | if (add_inbuf(port->in_vq, buf) < 0) { |
418 | err++; | 534 | err++; |
419 | free_buf(buf); | 535 | free_buf(buf, false); |
420 | } | 536 | } |
421 | port->inbuf = NULL; | 537 | port->inbuf = NULL; |
422 | buf = get_inbuf(port); | 538 | buf = get_inbuf(port); |
@@ -459,7 +575,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
459 | vq = portdev->c_ovq; | 575 | vq = portdev->c_ovq; |
460 | 576 | ||
461 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 577 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
462 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) { | 578 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { |
463 | virtqueue_kick(vq); | 579 | virtqueue_kick(vq); |
464 | while (!virtqueue_get_buf(vq, &len)) | 580 | while (!virtqueue_get_buf(vq, &len)) |
465 | cpu_relax(); | 581 | cpu_relax(); |
@@ -476,55 +592,29 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, | |||
476 | return 0; | 592 | return 0; |
477 | } | 593 | } |
478 | 594 | ||
479 | struct buffer_token { | ||
480 | union { | ||
481 | void *buf; | ||
482 | struct scatterlist *sg; | ||
483 | } u; | ||
484 | /* If sgpages == 0 then buf is used, else sg is used */ | ||
485 | unsigned int sgpages; | ||
486 | }; | ||
487 | |||
488 | static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) | ||
489 | { | ||
490 | int i; | ||
491 | struct page *page; | ||
492 | |||
493 | for (i = 0; i < nrpages; i++) { | ||
494 | page = sg_page(&sg[i]); | ||
495 | if (!page) | ||
496 | break; | ||
497 | put_page(page); | ||
498 | } | ||
499 | kfree(sg); | ||
500 | } | ||
501 | 595 | ||
502 | /* Callers must take the port->outvq_lock */ | 596 | /* Callers must take the port->outvq_lock */ |
503 | static void reclaim_consumed_buffers(struct port *port) | 597 | static void reclaim_consumed_buffers(struct port *port) |
504 | { | 598 | { |
505 | struct buffer_token *tok; | 599 | struct port_buffer *buf; |
506 | unsigned int len; | 600 | unsigned int len; |
507 | 601 | ||
508 | if (!port->portdev) { | 602 | if (!port->portdev) { |
509 | /* Device has been unplugged. vqs are already gone. */ | 603 | /* Device has been unplugged. vqs are already gone. */ |
510 | return; | 604 | return; |
511 | } | 605 | } |
512 | while ((tok = virtqueue_get_buf(port->out_vq, &len))) { | 606 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { |
513 | if (tok->sgpages) | 607 | free_buf(buf, false); |
514 | reclaim_sg_pages(tok->u.sg, tok->sgpages); | ||
515 | else | ||
516 | kfree(tok->u.buf); | ||
517 | kfree(tok); | ||
518 | port->outvq_full = false; | 608 | port->outvq_full = false; |
519 | } | 609 | } |
520 | } | 610 | } |
521 | 611 | ||
522 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | 612 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, |
523 | int nents, size_t in_count, | 613 | int nents, size_t in_count, |
524 | struct buffer_token *tok, bool nonblock) | 614 | void *data, bool nonblock) |
525 | { | 615 | { |
526 | struct virtqueue *out_vq; | 616 | struct virtqueue *out_vq; |
527 | ssize_t ret; | 617 | int err; |
528 | unsigned long flags; | 618 | unsigned long flags; |
529 | unsigned int len; | 619 | unsigned int len; |
530 | 620 | ||
@@ -534,17 +624,17 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | |||
534 | 624 | ||
535 | reclaim_consumed_buffers(port); | 625 | reclaim_consumed_buffers(port); |
536 | 626 | ||
537 | ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); | 627 | err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC); |
538 | 628 | ||
539 | /* Tell Host to go! */ | 629 | /* Tell Host to go! */ |
540 | virtqueue_kick(out_vq); | 630 | virtqueue_kick(out_vq); |
541 | 631 | ||
542 | if (ret < 0) { | 632 | if (err) { |
543 | in_count = 0; | 633 | in_count = 0; |
544 | goto done; | 634 | goto done; |
545 | } | 635 | } |
546 | 636 | ||
547 | if (ret == 0) | 637 | if (out_vq->num_free == 0) |
548 | port->outvq_full = true; | 638 | port->outvq_full = true; |
549 | 639 | ||
550 | if (nonblock) | 640 | if (nonblock) |
@@ -572,37 +662,6 @@ done: | |||
572 | return in_count; | 662 | return in_count; |
573 | } | 663 | } |
574 | 664 | ||
575 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | ||
576 | bool nonblock) | ||
577 | { | ||
578 | struct scatterlist sg[1]; | ||
579 | struct buffer_token *tok; | ||
580 | |||
581 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
582 | if (!tok) | ||
583 | return -ENOMEM; | ||
584 | tok->sgpages = 0; | ||
585 | tok->u.buf = in_buf; | ||
586 | |||
587 | sg_init_one(sg, in_buf, in_count); | ||
588 | |||
589 | return __send_to_port(port, sg, 1, in_count, tok, nonblock); | ||
590 | } | ||
591 | |||
592 | static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, | ||
593 | size_t in_count, bool nonblock) | ||
594 | { | ||
595 | struct buffer_token *tok; | ||
596 | |||
597 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
598 | if (!tok) | ||
599 | return -ENOMEM; | ||
600 | tok->sgpages = nents; | ||
601 | tok->u.sg = sg; | ||
602 | |||
603 | return __send_to_port(port, sg, nents, in_count, tok, nonblock); | ||
604 | } | ||
605 | |||
606 | /* | 665 | /* |
607 | * Give out the data that's requested from the buffer that we have | 666 | * Give out the data that's requested from the buffer that we have |
608 | * queued up. | 667 | * queued up. |
@@ -748,9 +807,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
748 | size_t count, loff_t *offp) | 807 | size_t count, loff_t *offp) |
749 | { | 808 | { |
750 | struct port *port; | 809 | struct port *port; |
751 | char *buf; | 810 | struct port_buffer *buf; |
752 | ssize_t ret; | 811 | ssize_t ret; |
753 | bool nonblock; | 812 | bool nonblock; |
813 | struct scatterlist sg[1]; | ||
754 | 814 | ||
755 | /* Userspace could be out to fool us */ | 815 | /* Userspace could be out to fool us */ |
756 | if (!count) | 816 | if (!count) |
@@ -766,11 +826,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
766 | 826 | ||
767 | count = min((size_t)(32 * 1024), count); | 827 | count = min((size_t)(32 * 1024), count); |
768 | 828 | ||
769 | buf = kmalloc(count, GFP_KERNEL); | 829 | buf = alloc_buf(port->out_vq, count, 0); |
770 | if (!buf) | 830 | if (!buf) |
771 | return -ENOMEM; | 831 | return -ENOMEM; |
772 | 832 | ||
773 | ret = copy_from_user(buf, ubuf, count); | 833 | ret = copy_from_user(buf->buf, ubuf, count); |
774 | if (ret) { | 834 | if (ret) { |
775 | ret = -EFAULT; | 835 | ret = -EFAULT; |
776 | goto free_buf; | 836 | goto free_buf; |
@@ -784,13 +844,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
784 | * through to the host. | 844 | * through to the host. |
785 | */ | 845 | */ |
786 | nonblock = true; | 846 | nonblock = true; |
787 | ret = send_buf(port, buf, count, nonblock); | 847 | sg_init_one(sg, buf->buf, count); |
848 | ret = __send_to_port(port, sg, 1, count, buf, nonblock); | ||
788 | 849 | ||
789 | if (nonblock && ret > 0) | 850 | if (nonblock && ret > 0) |
790 | goto out; | 851 | goto out; |
791 | 852 | ||
792 | free_buf: | 853 | free_buf: |
793 | kfree(buf); | 854 | free_buf(buf, true); |
794 | out: | 855 | out: |
795 | return ret; | 856 | return ret; |
796 | } | 857 | } |
@@ -856,6 +917,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
856 | struct port *port = filp->private_data; | 917 | struct port *port = filp->private_data; |
857 | struct sg_list sgl; | 918 | struct sg_list sgl; |
858 | ssize_t ret; | 919 | ssize_t ret; |
920 | struct port_buffer *buf; | ||
859 | struct splice_desc sd = { | 921 | struct splice_desc sd = { |
860 | .total_len = len, | 922 | .total_len = len, |
861 | .flags = flags, | 923 | .flags = flags, |
@@ -863,22 +925,34 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
863 | .u.data = &sgl, | 925 | .u.data = &sgl, |
864 | }; | 926 | }; |
865 | 927 | ||
928 | /* | ||
929 | * Rproc_serial does not yet support splice. To support splice | ||
930 | * pipe_to_sg() must allocate dma-buffers and copy content from | ||
931 | * regular pages to dma pages. And alloc_buf and free_buf must | ||
932 | * support allocating and freeing such a list of dma-buffers. | ||
933 | */ | ||
934 | if (is_rproc_serial(port->out_vq->vdev)) | ||
935 | return -EINVAL; | ||
936 | |||
866 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | 937 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); |
867 | if (ret < 0) | 938 | if (ret < 0) |
868 | return ret; | 939 | return ret; |
869 | 940 | ||
941 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); | ||
942 | if (!buf) | ||
943 | return -ENOMEM; | ||
944 | |||
870 | sgl.n = 0; | 945 | sgl.n = 0; |
871 | sgl.len = 0; | 946 | sgl.len = 0; |
872 | sgl.size = pipe->nrbufs; | 947 | sgl.size = pipe->nrbufs; |
873 | sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); | 948 | sgl.sg = buf->sg; |
874 | if (unlikely(!sgl.sg)) | ||
875 | return -ENOMEM; | ||
876 | |||
877 | sg_init_table(sgl.sg, sgl.size); | 949 | sg_init_table(sgl.sg, sgl.size); |
878 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 950 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); |
879 | if (likely(ret > 0)) | 951 | if (likely(ret > 0)) |
880 | ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); | 952 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); |
881 | 953 | ||
954 | if (unlikely(ret <= 0)) | ||
955 | free_buf(buf, true); | ||
882 | return ret; | 956 | return ret; |
883 | } | 957 | } |
884 | 958 | ||
@@ -927,6 +1001,7 @@ static int port_fops_release(struct inode *inode, struct file *filp) | |||
927 | reclaim_consumed_buffers(port); | 1001 | reclaim_consumed_buffers(port); |
928 | spin_unlock_irq(&port->outvq_lock); | 1002 | spin_unlock_irq(&port->outvq_lock); |
929 | 1003 | ||
1004 | reclaim_dma_bufs(); | ||
930 | /* | 1005 | /* |
931 | * Locks aren't necessary here as a port can't be opened after | 1006 | * Locks aren't necessary here as a port can't be opened after |
932 | * unplug, and if a port isn't unplugged, a kref would already | 1007 | * unplug, and if a port isn't unplugged, a kref would already |
@@ -1031,6 +1106,7 @@ static const struct file_operations port_fops = { | |||
1031 | static int put_chars(u32 vtermno, const char *buf, int count) | 1106 | static int put_chars(u32 vtermno, const char *buf, int count) |
1032 | { | 1107 | { |
1033 | struct port *port; | 1108 | struct port *port; |
1109 | struct scatterlist sg[1]; | ||
1034 | 1110 | ||
1035 | if (unlikely(early_put_chars)) | 1111 | if (unlikely(early_put_chars)) |
1036 | return early_put_chars(vtermno, buf, count); | 1112 | return early_put_chars(vtermno, buf, count); |
@@ -1039,7 +1115,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
1039 | if (!port) | 1115 | if (!port) |
1040 | return -EPIPE; | 1116 | return -EPIPE; |
1041 | 1117 | ||
1042 | return send_buf(port, (void *)buf, count, false); | 1118 | sg_init_one(sg, buf, count); |
1119 | return __send_to_port(port, sg, 1, count, (void *)buf, false); | ||
1043 | } | 1120 | } |
1044 | 1121 | ||
1045 | /* | 1122 | /* |
@@ -1076,7 +1153,10 @@ static void resize_console(struct port *port) | |||
1076 | return; | 1153 | return; |
1077 | 1154 | ||
1078 | vdev = port->portdev->vdev; | 1155 | vdev = port->portdev->vdev; |
1079 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | 1156 | |
1157 | /* Don't test F_SIZE at all if we're rproc: not a valid feature! */ | ||
1158 | if (!is_rproc_serial(vdev) && | ||
1159 | virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | ||
1080 | hvc_resize(port->cons.hvc, port->cons.ws); | 1160 | hvc_resize(port->cons.hvc, port->cons.ws); |
1081 | } | 1161 | } |
1082 | 1162 | ||
@@ -1260,7 +1340,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | |||
1260 | 1340 | ||
1261 | nr_added_bufs = 0; | 1341 | nr_added_bufs = 0; |
1262 | do { | 1342 | do { |
1263 | buf = alloc_buf(PAGE_SIZE); | 1343 | buf = alloc_buf(vq, PAGE_SIZE, 0); |
1264 | if (!buf) | 1344 | if (!buf) |
1265 | break; | 1345 | break; |
1266 | 1346 | ||
@@ -1268,7 +1348,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | |||
1268 | ret = add_inbuf(vq, buf); | 1348 | ret = add_inbuf(vq, buf); |
1269 | if (ret < 0) { | 1349 | if (ret < 0) { |
1270 | spin_unlock_irq(lock); | 1350 | spin_unlock_irq(lock); |
1271 | free_buf(buf); | 1351 | free_buf(buf, true); |
1272 | break; | 1352 | break; |
1273 | } | 1353 | } |
1274 | nr_added_bufs++; | 1354 | nr_added_bufs++; |
@@ -1356,10 +1436,18 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1356 | goto free_device; | 1436 | goto free_device; |
1357 | } | 1437 | } |
1358 | 1438 | ||
1359 | /* | 1439 | if (is_rproc_serial(port->portdev->vdev)) |
1360 | * If we're not using multiport support, this has to be a console port | 1440 | /* |
1361 | */ | 1441 | * For rproc_serial assume remote processor is connected. |
1362 | if (!use_multiport(port->portdev)) { | 1442 | * rproc_serial does not want the console port, only |
1443 | * the generic port implementation. | ||
1444 | */ | ||
1445 | port->host_connected = true; | ||
1446 | else if (!use_multiport(port->portdev)) { | ||
1447 | /* | ||
1448 | * If we're not using multiport support, | ||
1449 | * this has to be a console port. | ||
1450 | */ | ||
1363 | err = init_port_console(port); | 1451 | err = init_port_console(port); |
1364 | if (err) | 1452 | if (err) |
1365 | goto free_inbufs; | 1453 | goto free_inbufs; |
@@ -1392,7 +1480,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1392 | 1480 | ||
1393 | free_inbufs: | 1481 | free_inbufs: |
1394 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1482 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
1395 | free_buf(buf); | 1483 | free_buf(buf, true); |
1396 | free_device: | 1484 | free_device: |
1397 | device_destroy(pdrvdata.class, port->dev->devt); | 1485 | device_destroy(pdrvdata.class, port->dev->devt); |
1398 | free_cdev: | 1486 | free_cdev: |
@@ -1434,7 +1522,11 @@ static void remove_port_data(struct port *port) | |||
1434 | 1522 | ||
1435 | /* Remove buffers we queued up for the Host to send us data in. */ | 1523 | /* Remove buffers we queued up for the Host to send us data in. */ |
1436 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1524 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
1437 | free_buf(buf); | 1525 | free_buf(buf, true); |
1526 | |||
1527 | /* Free pending buffers from the out-queue. */ | ||
1528 | while ((buf = virtqueue_detach_unused_buf(port->out_vq))) | ||
1529 | free_buf(buf, true); | ||
1438 | } | 1530 | } |
1439 | 1531 | ||
1440 | /* | 1532 | /* |
@@ -1636,7 +1728,7 @@ static void control_work_handler(struct work_struct *work) | |||
1636 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1728 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1637 | dev_warn(&portdev->vdev->dev, | 1729 | dev_warn(&portdev->vdev->dev, |
1638 | "Error adding buffer to queue\n"); | 1730 | "Error adding buffer to queue\n"); |
1639 | free_buf(buf); | 1731 | free_buf(buf, false); |
1640 | } | 1732 | } |
1641 | } | 1733 | } |
1642 | spin_unlock(&portdev->cvq_lock); | 1734 | spin_unlock(&portdev->cvq_lock); |
@@ -1832,10 +1924,10 @@ static void remove_controlq_data(struct ports_device *portdev) | |||
1832 | return; | 1924 | return; |
1833 | 1925 | ||
1834 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) | 1926 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) |
1835 | free_buf(buf); | 1927 | free_buf(buf, true); |
1836 | 1928 | ||
1837 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | 1929 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) |
1838 | free_buf(buf); | 1930 | free_buf(buf, true); |
1839 | } | 1931 | } |
1840 | 1932 | ||
1841 | /* | 1933 | /* |
@@ -1882,11 +1974,15 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
1882 | 1974 | ||
1883 | multiport = false; | 1975 | multiport = false; |
1884 | portdev->config.max_nr_ports = 1; | 1976 | portdev->config.max_nr_ports = 1; |
1885 | if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, | 1977 | |
1886 | offsetof(struct virtio_console_config, | 1978 | /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ |
1887 | max_nr_ports), | 1979 | if (!is_rproc_serial(vdev) && |
1888 | &portdev->config.max_nr_ports) == 0) | 1980 | virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, |
1981 | offsetof(struct virtio_console_config, | ||
1982 | max_nr_ports), | ||
1983 | &portdev->config.max_nr_ports) == 0) { | ||
1889 | multiport = true; | 1984 | multiport = true; |
1985 | } | ||
1890 | 1986 | ||
1891 | err = init_vqs(portdev); | 1987 | err = init_vqs(portdev); |
1892 | if (err < 0) { | 1988 | if (err < 0) { |
@@ -1996,6 +2092,16 @@ static unsigned int features[] = { | |||
1996 | VIRTIO_CONSOLE_F_MULTIPORT, | 2092 | VIRTIO_CONSOLE_F_MULTIPORT, |
1997 | }; | 2093 | }; |
1998 | 2094 | ||
2095 | static struct virtio_device_id rproc_serial_id_table[] = { | ||
2096 | #if IS_ENABLED(CONFIG_REMOTEPROC) | ||
2097 | { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID }, | ||
2098 | #endif | ||
2099 | { 0 }, | ||
2100 | }; | ||
2101 | |||
2102 | static unsigned int rproc_serial_features[] = { | ||
2103 | }; | ||
2104 | |||
1999 | #ifdef CONFIG_PM | 2105 | #ifdef CONFIG_PM |
2000 | static int virtcons_freeze(struct virtio_device *vdev) | 2106 | static int virtcons_freeze(struct virtio_device *vdev) |
2001 | { | 2107 | { |
@@ -2080,6 +2186,20 @@ static struct virtio_driver virtio_console = { | |||
2080 | #endif | 2186 | #endif |
2081 | }; | 2187 | }; |
2082 | 2188 | ||
2189 | /* | ||
2190 | * virtio_rproc_serial refers to __devinit function which causes | ||
2191 | * section mismatch warnings. So use __refdata to silence warnings. | ||
2192 | */ | ||
2193 | static struct virtio_driver __refdata virtio_rproc_serial = { | ||
2194 | .feature_table = rproc_serial_features, | ||
2195 | .feature_table_size = ARRAY_SIZE(rproc_serial_features), | ||
2196 | .driver.name = "virtio_rproc_serial", | ||
2197 | .driver.owner = THIS_MODULE, | ||
2198 | .id_table = rproc_serial_id_table, | ||
2199 | .probe = virtcons_probe, | ||
2200 | .remove = virtcons_remove, | ||
2201 | }; | ||
2202 | |||
2083 | static int __init init(void) | 2203 | static int __init init(void) |
2084 | { | 2204 | { |
2085 | int err; | 2205 | int err; |
@@ -2104,7 +2224,15 @@ static int __init init(void) | |||
2104 | pr_err("Error %d registering virtio driver\n", err); | 2224 | pr_err("Error %d registering virtio driver\n", err); |
2105 | goto free; | 2225 | goto free; |
2106 | } | 2226 | } |
2227 | err = register_virtio_driver(&virtio_rproc_serial); | ||
2228 | if (err < 0) { | ||
2229 | pr_err("Error %d registering virtio rproc serial driver\n", | ||
2230 | err); | ||
2231 | goto unregister; | ||
2232 | } | ||
2107 | return 0; | 2233 | return 0; |
2234 | unregister: | ||
2235 | unregister_virtio_driver(&virtio_console); | ||
2108 | free: | 2236 | free: |
2109 | if (pdrvdata.debugfs_dir) | 2237 | if (pdrvdata.debugfs_dir) |
2110 | debugfs_remove_recursive(pdrvdata.debugfs_dir); | 2238 | debugfs_remove_recursive(pdrvdata.debugfs_dir); |
@@ -2114,7 +2242,10 @@ free: | |||
2114 | 2242 | ||
2115 | static void __exit fini(void) | 2243 | static void __exit fini(void) |
2116 | { | 2244 | { |
2245 | reclaim_dma_bufs(); | ||
2246 | |||
2117 | unregister_virtio_driver(&virtio_console); | 2247 | unregister_virtio_driver(&virtio_console); |
2248 | unregister_virtio_driver(&virtio_rproc_serial); | ||
2118 | 2249 | ||
2119 | class_destroy(pdrvdata.class); | 2250 | class_destroy(pdrvdata.class); |
2120 | if (pdrvdata.debugfs_dir) | 2251 | if (pdrvdata.debugfs_dir) |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index b5fdcb78a75b..a5ebc0083d87 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -225,7 +225,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
225 | * eventfd (ie. the appropriate virtqueue thread)? | 225 | * eventfd (ie. the appropriate virtqueue thread)? |
226 | */ | 226 | */ |
227 | if (!send_notify_to_eventfd(cpu)) { | 227 | if (!send_notify_to_eventfd(cpu)) { |
228 | /* OK, we tell the main Laucher. */ | 228 | /* OK, we tell the main Launcher. */ |
229 | if (put_user(cpu->pending_notify, user)) | 229 | if (put_user(cpu->pending_notify, user)) |
230 | return -EFAULT; | 230 | return -EFAULT; |
231 | return sizeof(cpu->pending_notify); | 231 | return sizeof(cpu->pending_notify); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 68d64f0313ea..a6fcf15adc4f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -130,7 +130,6 @@ struct skb_vnet_hdr { | |||
130 | struct virtio_net_hdr hdr; | 130 | struct virtio_net_hdr hdr; |
131 | struct virtio_net_hdr_mrg_rxbuf mhdr; | 131 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
132 | }; | 132 | }; |
133 | unsigned int num_sg; | ||
134 | }; | 133 | }; |
135 | 134 | ||
136 | struct padded_vnet_hdr { | 135 | struct padded_vnet_hdr { |
@@ -530,10 +529,10 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | |||
530 | err = add_recvbuf_small(rq, gfp); | 529 | err = add_recvbuf_small(rq, gfp); |
531 | 530 | ||
532 | oom = err == -ENOMEM; | 531 | oom = err == -ENOMEM; |
533 | if (err < 0) | 532 | if (err) |
534 | break; | 533 | break; |
535 | ++rq->num; | 534 | ++rq->num; |
536 | } while (err > 0); | 535 | } while (rq->vq->num_free); |
537 | if (unlikely(rq->num > rq->max)) | 536 | if (unlikely(rq->num > rq->max)) |
538 | rq->max = rq->num; | 537 | rq->max = rq->num; |
539 | virtqueue_kick(rq->vq); | 538 | virtqueue_kick(rq->vq); |
@@ -640,10 +639,10 @@ static int virtnet_open(struct net_device *dev) | |||
640 | return 0; | 639 | return 0; |
641 | } | 640 | } |
642 | 641 | ||
643 | static unsigned int free_old_xmit_skbs(struct send_queue *sq) | 642 | static void free_old_xmit_skbs(struct send_queue *sq) |
644 | { | 643 | { |
645 | struct sk_buff *skb; | 644 | struct sk_buff *skb; |
646 | unsigned int len, tot_sgs = 0; | 645 | unsigned int len; |
647 | struct virtnet_info *vi = sq->vq->vdev->priv; | 646 | struct virtnet_info *vi = sq->vq->vdev->priv; |
648 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 647 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
649 | 648 | ||
@@ -655,10 +654,8 @@ static unsigned int free_old_xmit_skbs(struct send_queue *sq) | |||
655 | stats->tx_packets++; | 654 | stats->tx_packets++; |
656 | u64_stats_update_end(&stats->tx_syncp); | 655 | u64_stats_update_end(&stats->tx_syncp); |
657 | 656 | ||
658 | tot_sgs += skb_vnet_hdr(skb)->num_sg; | ||
659 | dev_kfree_skb_any(skb); | 657 | dev_kfree_skb_any(skb); |
660 | } | 658 | } |
661 | return tot_sgs; | ||
662 | } | 659 | } |
663 | 660 | ||
664 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | 661 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
@@ -666,6 +663,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | |||
666 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); | 663 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
667 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | 664 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
668 | struct virtnet_info *vi = sq->vq->vdev->priv; | 665 | struct virtnet_info *vi = sq->vq->vdev->priv; |
666 | unsigned num_sg; | ||
669 | 667 | ||
670 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); | 668 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
671 | 669 | ||
@@ -704,8 +702,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | |||
704 | else | 702 | else |
705 | sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); | 703 | sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); |
706 | 704 | ||
707 | hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; | 705 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
708 | return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg, | 706 | return virtqueue_add_buf(sq->vq, sq->sg, num_sg, |
709 | 0, skb, GFP_ATOMIC); | 707 | 0, skb, GFP_ATOMIC); |
710 | } | 708 | } |
711 | 709 | ||
@@ -714,28 +712,20 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
714 | struct virtnet_info *vi = netdev_priv(dev); | 712 | struct virtnet_info *vi = netdev_priv(dev); |
715 | int qnum = skb_get_queue_mapping(skb); | 713 | int qnum = skb_get_queue_mapping(skb); |
716 | struct send_queue *sq = &vi->sq[qnum]; | 714 | struct send_queue *sq = &vi->sq[qnum]; |
717 | int capacity; | 715 | int err; |
718 | 716 | ||
719 | /* Free up any pending old buffers before queueing new ones. */ | 717 | /* Free up any pending old buffers before queueing new ones. */ |
720 | free_old_xmit_skbs(sq); | 718 | free_old_xmit_skbs(sq); |
721 | 719 | ||
722 | /* Try to transmit */ | 720 | /* Try to transmit */ |
723 | capacity = xmit_skb(sq, skb); | 721 | err = xmit_skb(sq, skb); |
724 | 722 | ||
725 | /* This can happen with OOM and indirect buffers. */ | 723 | /* This should not happen! */ |
726 | if (unlikely(capacity < 0)) { | 724 | if (unlikely(err)) { |
727 | if (likely(capacity == -ENOMEM)) { | 725 | dev->stats.tx_fifo_errors++; |
728 | if (net_ratelimit()) | 726 | if (net_ratelimit()) |
729 | dev_warn(&dev->dev, | 727 | dev_warn(&dev->dev, |
730 | "TXQ (%d) failure: out of memory\n", | 728 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
731 | qnum); | ||
732 | } else { | ||
733 | dev->stats.tx_fifo_errors++; | ||
734 | if (net_ratelimit()) | ||
735 | dev_warn(&dev->dev, | ||
736 | "Unexpected TXQ (%d) failure: %d\n", | ||
737 | qnum, capacity); | ||
738 | } | ||
739 | dev->stats.tx_dropped++; | 729 | dev->stats.tx_dropped++; |
740 | kfree_skb(skb); | 730 | kfree_skb(skb); |
741 | return NETDEV_TX_OK; | 731 | return NETDEV_TX_OK; |
@@ -748,12 +738,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
748 | 738 | ||
749 | /* Apparently nice girls don't return TX_BUSY; stop the queue | 739 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
750 | * before it gets out of hand. Naturally, this wastes entries. */ | 740 | * before it gets out of hand. Naturally, this wastes entries. */ |
751 | if (capacity < 2+MAX_SKB_FRAGS) { | 741 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
752 | netif_stop_subqueue(dev, qnum); | 742 | netif_stop_subqueue(dev, qnum); |
753 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | 743 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
754 | /* More just got used, free them then recheck. */ | 744 | /* More just got used, free them then recheck. */ |
755 | capacity += free_old_xmit_skbs(sq); | 745 | free_old_xmit_skbs(sq); |
756 | if (capacity >= 2+MAX_SKB_FRAGS) { | 746 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
757 | netif_start_subqueue(dev, qnum); | 747 | netif_start_subqueue(dev, qnum); |
758 | virtqueue_disable_cb(sq->vq); | 748 | virtqueue_disable_cb(sq->vq); |
759 | } | 749 | } |
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 1859f71372e2..027096fe6a12 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c | |||
@@ -764,7 +764,7 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, | |||
764 | 764 | ||
765 | /* add message to the remote processor's virtqueue */ | 765 | /* add message to the remote processor's virtqueue */ |
766 | err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL); | 766 | err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL); |
767 | if (err < 0) { | 767 | if (err) { |
768 | /* | 768 | /* |
769 | * need to reclaim the buffer here, otherwise it's lost | 769 | * need to reclaim the buffer here, otherwise it's lost |
770 | * (memory won't leak, but rpmsg won't use it again for TX). | 770 | * (memory won't leak, but rpmsg won't use it again for TX). |
@@ -776,8 +776,6 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, | |||
776 | 776 | ||
777 | /* tell the remote processor it has a pending message to read */ | 777 | /* tell the remote processor it has a pending message to read */ |
778 | virtqueue_kick(vrp->svq); | 778 | virtqueue_kick(vrp->svq); |
779 | |||
780 | err = 0; | ||
781 | out: | 779 | out: |
782 | mutex_unlock(&vrp->tx_lock); | 780 | mutex_unlock(&vrp->tx_lock); |
783 | return err; | 781 | return err; |
@@ -980,7 +978,7 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
980 | 978 | ||
981 | err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr, | 979 | err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr, |
982 | GFP_KERNEL); | 980 | GFP_KERNEL); |
983 | WARN_ON(err < 0); /* sanity check; this can't really happen */ | 981 | WARN_ON(err); /* sanity check; this can't really happen */ |
984 | } | 982 | } |
985 | 983 | ||
986 | /* suppress "tx-complete" interrupts */ | 984 | /* suppress "tx-complete" interrupts */ |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index dd8dc27fa32c..74ab67a169ec 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -215,7 +215,7 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) | |||
215 | static int virtscsi_kick_event(struct virtio_scsi *vscsi, | 215 | static int virtscsi_kick_event(struct virtio_scsi *vscsi, |
216 | struct virtio_scsi_event_node *event_node) | 216 | struct virtio_scsi_event_node *event_node) |
217 | { | 217 | { |
218 | int ret; | 218 | int err; |
219 | struct scatterlist sg; | 219 | struct scatterlist sg; |
220 | unsigned long flags; | 220 | unsigned long flags; |
221 | 221 | ||
@@ -223,13 +223,14 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, | |||
223 | 223 | ||
224 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); | 224 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); |
225 | 225 | ||
226 | ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC); | 226 | err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, |
227 | if (ret >= 0) | 227 | GFP_ATOMIC); |
228 | if (!err) | ||
228 | virtqueue_kick(vscsi->event_vq.vq); | 229 | virtqueue_kick(vscsi->event_vq.vq); |
229 | 230 | ||
230 | spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); | 231 | spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); |
231 | 232 | ||
232 | return ret; | 233 | return err; |
233 | } | 234 | } |
234 | 235 | ||
235 | static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) | 236 | static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) |
@@ -410,22 +411,23 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, | |||
410 | { | 411 | { |
411 | unsigned int out_num, in_num; | 412 | unsigned int out_num, in_num; |
412 | unsigned long flags; | 413 | unsigned long flags; |
413 | int ret; | 414 | int err; |
415 | bool needs_kick = false; | ||
414 | 416 | ||
415 | spin_lock_irqsave(&tgt->tgt_lock, flags); | 417 | spin_lock_irqsave(&tgt->tgt_lock, flags); |
416 | virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); | 418 | virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); |
417 | 419 | ||
418 | spin_lock(&vq->vq_lock); | 420 | spin_lock(&vq->vq_lock); |
419 | ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); | 421 | err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); |
420 | spin_unlock(&tgt->tgt_lock); | 422 | spin_unlock(&tgt->tgt_lock); |
421 | if (ret >= 0) | 423 | if (!err) |
422 | ret = virtqueue_kick_prepare(vq->vq); | 424 | needs_kick = virtqueue_kick_prepare(vq->vq); |
423 | 425 | ||
424 | spin_unlock_irqrestore(&vq->vq_lock, flags); | 426 | spin_unlock_irqrestore(&vq->vq_lock, flags); |
425 | 427 | ||
426 | if (ret > 0) | 428 | if (needs_kick) |
427 | virtqueue_notify(vq->vq); | 429 | virtqueue_notify(vq->vq); |
428 | return ret; | 430 | return err; |
429 | } | 431 | } |
430 | 432 | ||
431 | static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | 433 | static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) |
@@ -467,7 +469,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | |||
467 | 469 | ||
468 | if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, | 470 | if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, |
469 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | 471 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, |
470 | GFP_ATOMIC) >= 0) | 472 | GFP_ATOMIC) == 0) |
471 | ret = 0; | 473 | ret = 0; |
472 | else | 474 | else |
473 | mempool_free(cmd, virtscsi_cmd_pool); | 475 | mempool_free(cmd, virtscsi_cmd_pool); |
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 809b0de59c09..ee59b74768d9 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c | |||
@@ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida); | |||
10 | static ssize_t device_show(struct device *_d, | 10 | static ssize_t device_show(struct device *_d, |
11 | struct device_attribute *attr, char *buf) | 11 | struct device_attribute *attr, char *buf) |
12 | { | 12 | { |
13 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 13 | struct virtio_device *dev = dev_to_virtio(_d); |
14 | return sprintf(buf, "0x%04x\n", dev->id.device); | 14 | return sprintf(buf, "0x%04x\n", dev->id.device); |
15 | } | 15 | } |
16 | static ssize_t vendor_show(struct device *_d, | 16 | static ssize_t vendor_show(struct device *_d, |
17 | struct device_attribute *attr, char *buf) | 17 | struct device_attribute *attr, char *buf) |
18 | { | 18 | { |
19 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 19 | struct virtio_device *dev = dev_to_virtio(_d); |
20 | return sprintf(buf, "0x%04x\n", dev->id.vendor); | 20 | return sprintf(buf, "0x%04x\n", dev->id.vendor); |
21 | } | 21 | } |
22 | static ssize_t status_show(struct device *_d, | 22 | static ssize_t status_show(struct device *_d, |
23 | struct device_attribute *attr, char *buf) | 23 | struct device_attribute *attr, char *buf) |
24 | { | 24 | { |
25 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 25 | struct virtio_device *dev = dev_to_virtio(_d); |
26 | return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); | 26 | return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); |
27 | } | 27 | } |
28 | static ssize_t modalias_show(struct device *_d, | 28 | static ssize_t modalias_show(struct device *_d, |
29 | struct device_attribute *attr, char *buf) | 29 | struct device_attribute *attr, char *buf) |
30 | { | 30 | { |
31 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 31 | struct virtio_device *dev = dev_to_virtio(_d); |
32 | |||
33 | return sprintf(buf, "virtio:d%08Xv%08X\n", | 32 | return sprintf(buf, "virtio:d%08Xv%08X\n", |
34 | dev->id.device, dev->id.vendor); | 33 | dev->id.device, dev->id.vendor); |
35 | } | 34 | } |
36 | static ssize_t features_show(struct device *_d, | 35 | static ssize_t features_show(struct device *_d, |
37 | struct device_attribute *attr, char *buf) | 36 | struct device_attribute *attr, char *buf) |
38 | { | 37 | { |
39 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | 38 | struct virtio_device *dev = dev_to_virtio(_d); |
40 | unsigned int i; | 39 | unsigned int i; |
41 | ssize_t len = 0; | 40 | ssize_t len = 0; |
42 | 41 | ||
@@ -71,10 +70,10 @@ static inline int virtio_id_match(const struct virtio_device *dev, | |||
71 | static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | 70 | static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) |
72 | { | 71 | { |
73 | unsigned int i; | 72 | unsigned int i; |
74 | struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); | 73 | struct virtio_device *dev = dev_to_virtio(_dv); |
75 | const struct virtio_device_id *ids; | 74 | const struct virtio_device_id *ids; |
76 | 75 | ||
77 | ids = container_of(_dr, struct virtio_driver, driver)->id_table; | 76 | ids = drv_to_virtio(_dr)->id_table; |
78 | for (i = 0; ids[i].device; i++) | 77 | for (i = 0; ids[i].device; i++) |
79 | if (virtio_id_match(dev, &ids[i])) | 78 | if (virtio_id_match(dev, &ids[i])) |
80 | return 1; | 79 | return 1; |
@@ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) | |||
83 | 82 | ||
84 | static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) | 83 | static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) |
85 | { | 84 | { |
86 | struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); | 85 | struct virtio_device *dev = dev_to_virtio(_dv); |
87 | 86 | ||
88 | return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", | 87 | return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", |
89 | dev->id.device, dev->id.vendor); | 88 | dev->id.device, dev->id.vendor); |
@@ -98,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, | |||
98 | unsigned int fbit) | 97 | unsigned int fbit) |
99 | { | 98 | { |
100 | unsigned int i; | 99 | unsigned int i; |
101 | struct virtio_driver *drv = container_of(vdev->dev.driver, | 100 | struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); |
102 | struct virtio_driver, driver); | ||
103 | 101 | ||
104 | for (i = 0; i < drv->feature_table_size; i++) | 102 | for (i = 0; i < drv->feature_table_size; i++) |
105 | if (drv->feature_table[i] == fbit) | 103 | if (drv->feature_table[i] == fbit) |
@@ -111,9 +109,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); | |||
111 | static int virtio_dev_probe(struct device *_d) | 109 | static int virtio_dev_probe(struct device *_d) |
112 | { | 110 | { |
113 | int err, i; | 111 | int err, i; |
114 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 112 | struct virtio_device *dev = dev_to_virtio(_d); |
115 | struct virtio_driver *drv = container_of(dev->dev.driver, | 113 | struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); |
116 | struct virtio_driver, driver); | ||
117 | u32 device_features; | 114 | u32 device_features; |
118 | 115 | ||
119 | /* We have a driver! */ | 116 | /* We have a driver! */ |
@@ -152,9 +149,8 @@ static int virtio_dev_probe(struct device *_d) | |||
152 | 149 | ||
153 | static int virtio_dev_remove(struct device *_d) | 150 | static int virtio_dev_remove(struct device *_d) |
154 | { | 151 | { |
155 | struct virtio_device *dev = container_of(_d,struct virtio_device,dev); | 152 | struct virtio_device *dev = dev_to_virtio(_d); |
156 | struct virtio_driver *drv = container_of(dev->dev.driver, | 153 | struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); |
157 | struct virtio_driver, driver); | ||
158 | 154 | ||
159 | drv->remove(dev); | 155 | drv->remove(dev); |
160 | 156 | ||
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 2a70558b36ea..d19fe3e323b4 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -139,10 +139,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num) | |||
139 | struct page *page = balloon_page_enqueue(vb_dev_info); | 139 | struct page *page = balloon_page_enqueue(vb_dev_info); |
140 | 140 | ||
141 | if (!page) { | 141 | if (!page) { |
142 | if (printk_ratelimit()) | 142 | dev_info_ratelimited(&vb->vdev->dev, |
143 | dev_printk(KERN_INFO, &vb->vdev->dev, | 143 | "Out of puff! Can't get %u pages\n", |
144 | "Out of puff! Can't get %u pages\n", | 144 | VIRTIO_BALLOON_PAGES_PER_PAGE); |
145 | VIRTIO_BALLOON_PAGES_PER_PAGE); | ||
146 | /* Sleep for at least 1/5 of a second before retry. */ | 145 | /* Sleep for at least 1/5 of a second before retry. */ |
147 | msleep(200); | 146 | msleep(200); |
148 | break; | 147 | break; |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 6b1b7e184939..634f80bcdbd7 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq) | |||
225 | 225 | ||
226 | /* We write the queue's selector into the notification register to | 226 | /* We write the queue's selector into the notification register to |
227 | * signal the other end */ | 227 | * signal the other end */ |
228 | writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); | 228 | writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); |
229 | } | 229 | } |
230 | 230 | ||
231 | /* Notify all virtqueues on an interrupt. */ | 231 | /* Notify all virtqueues on an interrupt. */ |
@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq) | |||
266 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); | 266 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); |
267 | struct virtio_mmio_vq_info *info = vq->priv; | 267 | struct virtio_mmio_vq_info *info = vq->priv; |
268 | unsigned long flags, size; | 268 | unsigned long flags, size; |
269 | unsigned int index = virtqueue_get_queue_index(vq); | 269 | unsigned int index = vq->index; |
270 | 270 | ||
271 | spin_lock_irqsave(&vm_dev->lock, flags); | 271 | spin_lock_irqsave(&vm_dev->lock, flags); |
272 | list_del(&info->node); | 272 | list_del(&info->node); |
@@ -521,25 +521,33 @@ static int vm_cmdline_set(const char *device, | |||
521 | int err; | 521 | int err; |
522 | struct resource resources[2] = {}; | 522 | struct resource resources[2] = {}; |
523 | char *str; | 523 | char *str; |
524 | long long int base; | 524 | long long int base, size; |
525 | unsigned int irq; | ||
525 | int processed, consumed = 0; | 526 | int processed, consumed = 0; |
526 | struct platform_device *pdev; | 527 | struct platform_device *pdev; |
527 | 528 | ||
528 | resources[0].flags = IORESOURCE_MEM; | 529 | /* Consume "size" part of the command line parameter */ |
529 | resources[1].flags = IORESOURCE_IRQ; | 530 | size = memparse(device, &str); |
530 | |||
531 | resources[0].end = memparse(device, &str) - 1; | ||
532 | 531 | ||
532 | /* Get "@<base>:<irq>[:<id>]" chunks */ | ||
533 | processed = sscanf(str, "@%lli:%u%n:%d%n", | 533 | processed = sscanf(str, "@%lli:%u%n:%d%n", |
534 | &base, &resources[1].start, &consumed, | 534 | &base, &irq, &consumed, |
535 | &vm_cmdline_id, &consumed); | 535 | &vm_cmdline_id, &consumed); |
536 | 536 | ||
537 | if (processed < 2 || processed > 3 || str[consumed]) | 537 | /* |
538 | * sscanf() must processes at least 2 chunks; also there | ||
539 | * must be no extra characters after the last chunk, so | ||
540 | * str[consumed] must be '\0' | ||
541 | */ | ||
542 | if (processed < 2 || str[consumed]) | ||
538 | return -EINVAL; | 543 | return -EINVAL; |
539 | 544 | ||
545 | resources[0].flags = IORESOURCE_MEM; | ||
540 | resources[0].start = base; | 546 | resources[0].start = base; |
541 | resources[0].end += base; | 547 | resources[0].end = base + size - 1; |
542 | resources[1].end = resources[1].start; | 548 | |
549 | resources[1].flags = IORESOURCE_IRQ; | ||
550 | resources[1].start = resources[1].end = irq; | ||
543 | 551 | ||
544 | if (!vm_cmdline_parent_registered) { | 552 | if (!vm_cmdline_parent_registered) { |
545 | err = device_register(&vm_cmdline_parent); | 553 | err = device_register(&vm_cmdline_parent); |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index c33aea36598a..e3ecc94591ad 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq) | |||
203 | 203 | ||
204 | /* we write the queue's selector into the notification register to | 204 | /* we write the queue's selector into the notification register to |
205 | * signal the other end */ | 205 | * signal the other end */ |
206 | iowrite16(virtqueue_get_queue_index(vq), | 206 | iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); |
207 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); | ||
208 | } | 207 | } |
209 | 208 | ||
210 | /* Handle a configuration change: Tell driver if it wants to know. */ | 209 | /* Handle a configuration change: Tell driver if it wants to know. */ |
@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq) | |||
479 | list_del(&info->node); | 478 | list_del(&info->node); |
480 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 479 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
481 | 480 | ||
482 | iowrite16(virtqueue_get_queue_index(vq), | 481 | iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
483 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | ||
484 | 482 | ||
485 | if (vp_dev->msix_enabled) { | 483 | if (vp_dev->msix_enabled) { |
486 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 484 | iowrite16(VIRTIO_MSI_NO_VECTOR, |
@@ -830,16 +828,4 @@ static struct pci_driver virtio_pci_driver = { | |||
830 | #endif | 828 | #endif |
831 | }; | 829 | }; |
832 | 830 | ||
833 | static int __init virtio_pci_init(void) | 831 | module_pci_driver(virtio_pci_driver); |
834 | { | ||
835 | return pci_register_driver(&virtio_pci_driver); | ||
836 | } | ||
837 | |||
838 | module_init(virtio_pci_init); | ||
839 | |||
840 | static void __exit virtio_pci_exit(void) | ||
841 | { | ||
842 | pci_unregister_driver(&virtio_pci_driver); | ||
843 | } | ||
844 | |||
845 | module_exit(virtio_pci_exit); | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index e639584b2dbd..ffd7e7da5d3b 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -93,8 +93,6 @@ struct vring_virtqueue | |||
93 | /* Host publishes avail event idx */ | 93 | /* Host publishes avail event idx */ |
94 | bool event; | 94 | bool event; |
95 | 95 | ||
96 | /* Number of free buffers */ | ||
97 | unsigned int num_free; | ||
98 | /* Head of free buffer list. */ | 96 | /* Head of free buffer list. */ |
99 | unsigned int free_head; | 97 | unsigned int free_head; |
100 | /* Number we've added since last sync. */ | 98 | /* Number we've added since last sync. */ |
@@ -106,9 +104,6 @@ struct vring_virtqueue | |||
106 | /* How to notify other side. FIXME: commonalize hcalls! */ | 104 | /* How to notify other side. FIXME: commonalize hcalls! */ |
107 | void (*notify)(struct virtqueue *vq); | 105 | void (*notify)(struct virtqueue *vq); |
108 | 106 | ||
109 | /* Index of the queue */ | ||
110 | int queue_index; | ||
111 | |||
112 | #ifdef DEBUG | 107 | #ifdef DEBUG |
113 | /* They're supposed to lock for us. */ | 108 | /* They're supposed to lock for us. */ |
114 | unsigned int in_use; | 109 | unsigned int in_use; |
@@ -135,6 +130,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | |||
135 | unsigned head; | 130 | unsigned head; |
136 | int i; | 131 | int i; |
137 | 132 | ||
133 | /* | ||
134 | * We require lowmem mappings for the descriptors because | ||
135 | * otherwise virt_to_phys will give us bogus addresses in the | ||
136 | * virtqueue. | ||
137 | */ | ||
138 | gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); | ||
139 | |||
138 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); | 140 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); |
139 | if (!desc) | 141 | if (!desc) |
140 | return -ENOMEM; | 142 | return -ENOMEM; |
@@ -160,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | |||
160 | desc[i-1].next = 0; | 162 | desc[i-1].next = 0; |
161 | 163 | ||
162 | /* We're about to use a buffer */ | 164 | /* We're about to use a buffer */ |
163 | vq->num_free--; | 165 | vq->vq.num_free--; |
164 | 166 | ||
165 | /* Use a single buffer which doesn't continue */ | 167 | /* Use a single buffer which doesn't continue */ |
166 | head = vq->free_head; | 168 | head = vq->free_head; |
@@ -174,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq, | |||
174 | return head; | 176 | return head; |
175 | } | 177 | } |
176 | 178 | ||
177 | int virtqueue_get_queue_index(struct virtqueue *_vq) | ||
178 | { | ||
179 | struct vring_virtqueue *vq = to_vvq(_vq); | ||
180 | return vq->queue_index; | ||
181 | } | ||
182 | EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); | ||
183 | |||
184 | /** | 179 | /** |
185 | * virtqueue_add_buf - expose buffer to other end | 180 | * virtqueue_add_buf - expose buffer to other end |
186 | * @vq: the struct virtqueue we're talking about. | 181 | * @vq: the struct virtqueue we're talking about. |
@@ -193,10 +188,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); | |||
193 | * Caller must ensure we don't call this with other virtqueue operations | 188 | * Caller must ensure we don't call this with other virtqueue operations |
194 | * at the same time (except where noted). | 189 | * at the same time (except where noted). |
195 | * | 190 | * |
196 | * Returns remaining capacity of queue or a negative error | 191 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM). |
197 | * (ie. ENOSPC). Note that it only really makes sense to treat all | ||
198 | * positive return values as "available": indirect buffers mean that | ||
199 | * we can put an entire sg[] array inside a single queue entry. | ||
200 | */ | 192 | */ |
201 | int virtqueue_add_buf(struct virtqueue *_vq, | 193 | int virtqueue_add_buf(struct virtqueue *_vq, |
202 | struct scatterlist sg[], | 194 | struct scatterlist sg[], |
@@ -228,7 +220,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, | |||
228 | 220 | ||
229 | /* If the host supports indirect descriptor tables, and we have multiple | 221 | /* If the host supports indirect descriptor tables, and we have multiple |
230 | * buffers, then go indirect. FIXME: tune this threshold */ | 222 | * buffers, then go indirect. FIXME: tune this threshold */ |
231 | if (vq->indirect && (out + in) > 1 && vq->num_free) { | 223 | if (vq->indirect && (out + in) > 1 && vq->vq.num_free) { |
232 | head = vring_add_indirect(vq, sg, out, in, gfp); | 224 | head = vring_add_indirect(vq, sg, out, in, gfp); |
233 | if (likely(head >= 0)) | 225 | if (likely(head >= 0)) |
234 | goto add_head; | 226 | goto add_head; |
@@ -237,9 +229,9 @@ int virtqueue_add_buf(struct virtqueue *_vq, | |||
237 | BUG_ON(out + in > vq->vring.num); | 229 | BUG_ON(out + in > vq->vring.num); |
238 | BUG_ON(out + in == 0); | 230 | BUG_ON(out + in == 0); |
239 | 231 | ||
240 | if (vq->num_free < out + in) { | 232 | if (vq->vq.num_free < out + in) { |
241 | pr_debug("Can't add buf len %i - avail = %i\n", | 233 | pr_debug("Can't add buf len %i - avail = %i\n", |
242 | out + in, vq->num_free); | 234 | out + in, vq->vq.num_free); |
243 | /* FIXME: for historical reasons, we force a notify here if | 235 | /* FIXME: for historical reasons, we force a notify here if |
244 | * there are outgoing parts to the buffer. Presumably the | 236 | * there are outgoing parts to the buffer. Presumably the |
245 | * host should service the ring ASAP. */ | 237 | * host should service the ring ASAP. */ |
@@ -250,7 +242,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, | |||
250 | } | 242 | } |
251 | 243 | ||
252 | /* We're about to use some buffers from the free list. */ | 244 | /* We're about to use some buffers from the free list. */ |
253 | vq->num_free -= out + in; | 245 | vq->vq.num_free -= out + in; |
254 | 246 | ||
255 | head = vq->free_head; | 247 | head = vq->free_head; |
256 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | 248 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { |
@@ -296,7 +288,7 @@ add_head: | |||
296 | pr_debug("Added buffer head %i to %p\n", head, vq); | 288 | pr_debug("Added buffer head %i to %p\n", head, vq); |
297 | END_USE(vq); | 289 | END_USE(vq); |
298 | 290 | ||
299 | return vq->num_free; | 291 | return 0; |
300 | } | 292 | } |
301 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); | 293 | EXPORT_SYMBOL_GPL(virtqueue_add_buf); |
302 | 294 | ||
@@ -393,13 +385,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | |||
393 | 385 | ||
394 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 386 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
395 | i = vq->vring.desc[i].next; | 387 | i = vq->vring.desc[i].next; |
396 | vq->num_free++; | 388 | vq->vq.num_free++; |
397 | } | 389 | } |
398 | 390 | ||
399 | vq->vring.desc[i].next = vq->free_head; | 391 | vq->vring.desc[i].next = vq->free_head; |
400 | vq->free_head = head; | 392 | vq->free_head = head; |
401 | /* Plus final descriptor */ | 393 | /* Plus final descriptor */ |
402 | vq->num_free++; | 394 | vq->vq.num_free++; |
403 | } | 395 | } |
404 | 396 | ||
405 | static inline bool more_used(const struct vring_virtqueue *vq) | 397 | static inline bool more_used(const struct vring_virtqueue *vq) |
@@ -599,7 +591,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | |||
599 | return buf; | 591 | return buf; |
600 | } | 592 | } |
601 | /* That should have freed everything. */ | 593 | /* That should have freed everything. */ |
602 | BUG_ON(vq->num_free != vq->vring.num); | 594 | BUG_ON(vq->vq.num_free != vq->vring.num); |
603 | 595 | ||
604 | END_USE(vq); | 596 | END_USE(vq); |
605 | return NULL; | 597 | return NULL; |
@@ -653,12 +645,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | |||
653 | vq->vq.callback = callback; | 645 | vq->vq.callback = callback; |
654 | vq->vq.vdev = vdev; | 646 | vq->vq.vdev = vdev; |
655 | vq->vq.name = name; | 647 | vq->vq.name = name; |
648 | vq->vq.num_free = num; | ||
649 | vq->vq.index = index; | ||
656 | vq->notify = notify; | 650 | vq->notify = notify; |
657 | vq->weak_barriers = weak_barriers; | 651 | vq->weak_barriers = weak_barriers; |
658 | vq->broken = false; | 652 | vq->broken = false; |
659 | vq->last_used_idx = 0; | 653 | vq->last_used_idx = 0; |
660 | vq->num_added = 0; | 654 | vq->num_added = 0; |
661 | vq->queue_index = index; | ||
662 | list_add_tail(&vq->vq.list, &vdev->vqs); | 655 | list_add_tail(&vq->vq.list, &vdev->vqs); |
663 | #ifdef DEBUG | 656 | #ifdef DEBUG |
664 | vq->in_use = false; | 657 | vq->in_use = false; |
@@ -673,7 +666,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, | |||
673 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 666 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
674 | 667 | ||
675 | /* Put everything in free lists. */ | 668 | /* Put everything in free lists. */ |
676 | vq->num_free = num; | ||
677 | vq->free_head = 0; | 669 | vq->free_head = 0; |
678 | for (i = 0; i < num-1; i++) { | 670 | for (i = 0; i < num-1; i++) { |
679 | vq->vring.desc[i].next = i+1; | 671 | vq->vring.desc[i].next = i+1; |