aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c45
-rw-r--r--drivers/vhost/tcm_vhost.c1
-rw-r--r--drivers/vhost/vhost.c53
-rw-r--r--drivers/vhost/vhost.h21
4 files changed, 56 insertions, 64 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f80ae5fc9b00..532fc8830c42 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -126,6 +126,42 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
126 net->tx_poll_state = VHOST_NET_POLL_STARTED; 126 net->tx_poll_state = VHOST_NET_POLL_STARTED;
127} 127}
128 128
129/* In case of DMA done not in order in lower device driver for some reason.
130 * upend_idx is used to track end of used idx, done_idx is used to track head
131 * of used idx. Once lower device DMA done contiguously, we will signal KVM
132 * guest used idx.
133 */
134int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
135{
136 int i;
137 int j = 0;
138
139 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
140 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
141 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
142 vhost_add_used_and_signal(vq->dev, vq,
143 vq->heads[i].id, 0);
144 ++j;
145 } else
146 break;
147 }
148 if (j)
149 vq->done_idx = i;
150 return j;
151}
152
153static void vhost_zerocopy_callback(struct ubuf_info *ubuf, int status)
154{
155 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
156 struct vhost_virtqueue *vq = ubufs->vq;
157
158 vhost_poll_queue(&vq->poll);
159 /* set len to mark this desc buffers done DMA */
160 vq->heads[ubuf->desc].len = status ?
161 VHOST_DMA_FAILED_LEN : VHOST_DMA_DONE_LEN;
162 vhost_ubuf_put(ubufs);
163}
164
129/* Expects to be always run from workqueue - which acts as 165/* Expects to be always run from workqueue - which acts as
130 * read-size critical section for our kind of RCU. */ 166 * read-size critical section for our kind of RCU. */
131static void handle_tx(struct vhost_net *net) 167static void handle_tx(struct vhost_net *net)
@@ -594,9 +630,18 @@ static int vhost_net_release(struct inode *inode, struct file *f)
594 struct vhost_net *n = f->private_data; 630 struct vhost_net *n = f->private_data;
595 struct socket *tx_sock; 631 struct socket *tx_sock;
596 struct socket *rx_sock; 632 struct socket *rx_sock;
633 int i;
597 634
598 vhost_net_stop(n, &tx_sock, &rx_sock); 635 vhost_net_stop(n, &tx_sock, &rx_sock);
599 vhost_net_flush(n); 636 vhost_net_flush(n);
637 vhost_dev_stop(&n->dev);
638 for (i = 0; i < n->dev.nvqs; ++i) {
639 /* Wait for all lower device DMAs done. */
640 if (n->dev.vqs[i].ubufs)
641 vhost_ubuf_put_and_wait(n->dev.vqs[i].ubufs);
642
643 vhost_zerocopy_signal_used(n, &n->dev.vqs[i]);
644 }
600 vhost_dev_cleanup(&n->dev, false); 645 vhost_dev_cleanup(&n->dev, false);
601 if (tx_sock) 646 if (tx_sock)
602 fput(tx_sock->file); 647 fput(tx_sock->file);
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index aa31692064dd..23c138fdc195 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -895,6 +895,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
895 vhost_scsi_clear_endpoint(s, &backend); 895 vhost_scsi_clear_endpoint(s, &backend);
896 } 896 }
897 897
898 vhost_dev_stop(&s->dev);
898 vhost_dev_cleanup(&s->dev, false); 899 vhost_dev_cleanup(&s->dev, false);
899 kfree(s); 900 kfree(s);
900 return 0; 901 return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5affce325031..ef8f5988f855 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -26,10 +26,6 @@
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/cgroup.h> 27#include <linux/cgroup.h>
28 28
29#include <linux/net.h>
30#include <linux/if_packet.h>
31#include <linux/if_arp.h>
32
33#include "vhost.h" 29#include "vhost.h"
34 30
35enum { 31enum {
@@ -414,28 +410,16 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
414 return 0; 410 return 0;
415} 411}
416 412
417/* In case of DMA done not in order in lower device driver for some reason. 413void vhost_dev_stop(struct vhost_dev *dev)
418 * upend_idx is used to track end of used idx, done_idx is used to track head
419 * of used idx. Once lower device DMA done contiguously, we will signal KVM
420 * guest used idx.
421 */
422int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
423{ 414{
424 int i; 415 int i;
425 int j = 0; 416
426 417 for (i = 0; i < dev->nvqs; ++i) {
427 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) { 418 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
428 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { 419 vhost_poll_stop(&dev->vqs[i].poll);
429 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; 420 vhost_poll_flush(&dev->vqs[i].poll);
430 vhost_add_used_and_signal(vq->dev, vq, 421 }
431 vq->heads[i].id, 0);
432 ++j;
433 } else
434 break;
435 } 422 }
436 if (j)
437 vq->done_idx = i;
438 return j;
439} 423}
440 424
441/* Caller should have device mutex if and only if locked is set */ 425/* Caller should have device mutex if and only if locked is set */
@@ -444,17 +428,6 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
444 int i; 428 int i;
445 429
446 for (i = 0; i < dev->nvqs; ++i) { 430 for (i = 0; i < dev->nvqs; ++i) {
447 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
448 vhost_poll_stop(&dev->vqs[i].poll);
449 vhost_poll_flush(&dev->vqs[i].poll);
450 }
451 /* Wait for all lower device DMAs done. */
452 if (dev->vqs[i].ubufs)
453 vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
454
455 /* Signal guest as appropriate. */
456 vhost_zerocopy_signal_used(&dev->vqs[i]);
457
458 if (dev->vqs[i].error_ctx) 431 if (dev->vqs[i].error_ctx)
459 eventfd_ctx_put(dev->vqs[i].error_ctx); 432 eventfd_ctx_put(dev->vqs[i].error_ctx);
460 if (dev->vqs[i].error) 433 if (dev->vqs[i].error)
@@ -1599,15 +1572,3 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1599 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 1572 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1600 kfree(ubufs); 1573 kfree(ubufs);
1601} 1574}
1602
1603void vhost_zerocopy_callback(struct ubuf_info *ubuf, int status)
1604{
1605 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
1606 struct vhost_virtqueue *vq = ubufs->vq;
1607
1608 vhost_poll_queue(&vq->poll);
1609 /* set len to mark this desc buffers done DMA */
1610 vq->heads[ubuf->desc].len = status ?
1611 VHOST_DMA_FAILED_LEN : VHOST_DMA_DONE_LEN;
1612 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1613}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 464469d901d5..5e19e3d5db8c 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -7,27 +7,11 @@
7#include <linux/mutex.h> 7#include <linux/mutex.h>
8#include <linux/poll.h> 8#include <linux/poll.h>
9#include <linux/file.h> 9#include <linux/file.h>
10#include <linux/skbuff.h>
11#include <linux/uio.h> 10#include <linux/uio.h>
12#include <linux/virtio_config.h> 11#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h> 12#include <linux/virtio_ring.h>
14#include <linux/atomic.h> 13#include <linux/atomic.h>
15 14
16/*
17 * For transmit, used buffer len is unused; we override it to track buffer
18 * status internally; used for zerocopy tx only.
19 */
20/* Lower device DMA failed */
21#define VHOST_DMA_FAILED_LEN 3
22/* Lower device DMA done */
23#define VHOST_DMA_DONE_LEN 2
24/* Lower device DMA in progress */
25#define VHOST_DMA_IN_PROGRESS 1
26/* Buffer unused */
27#define VHOST_DMA_CLEAR_LEN 0
28
29#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
30
31struct vhost_device; 15struct vhost_device;
32 16
33struct vhost_work; 17struct vhost_work;
@@ -80,6 +64,8 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
80void vhost_ubuf_put(struct vhost_ubuf_ref *); 64void vhost_ubuf_put(struct vhost_ubuf_ref *);
81void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *); 65void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
82 66
67struct ubuf_info;
68
83/* The virtqueue structure describes a queue attached to a device. */ 69/* The virtqueue structure describes a queue attached to a device. */
84struct vhost_virtqueue { 70struct vhost_virtqueue {
85 struct vhost_dev *dev; 71 struct vhost_dev *dev;
@@ -177,6 +163,7 @@ long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
177long vhost_dev_check_owner(struct vhost_dev *); 163long vhost_dev_check_owner(struct vhost_dev *);
178long vhost_dev_reset_owner(struct vhost_dev *); 164long vhost_dev_reset_owner(struct vhost_dev *);
179void vhost_dev_cleanup(struct vhost_dev *, bool locked); 165void vhost_dev_cleanup(struct vhost_dev *, bool locked);
166void vhost_dev_stop(struct vhost_dev *);
180long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg); 167long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
181int vhost_vq_access_ok(struct vhost_virtqueue *vq); 168int vhost_vq_access_ok(struct vhost_virtqueue *vq);
182int vhost_log_access_ok(struct vhost_dev *); 169int vhost_log_access_ok(struct vhost_dev *);
@@ -201,8 +188,6 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
201 188
202int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 189int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
203 unsigned int log_num, u64 len); 190 unsigned int log_num, u64 len);
204void vhost_zerocopy_callback(struct ubuf_info *, bool);
205int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
206 191
207#define vq_err(vq, fmt, ...) do { \ 192#define vq_err(vq, fmt, ...) do { \
208 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ 193 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \