aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2012-11-01 05:16:46 -0400
committerDavid S. Miller <davem@davemloft.net>2012-11-02 21:29:58 -0400
commitb211616d712551874db3ce0fb44196f6faad2c34 (patch)
tree3945dbbf5d533828e0e31396dfb3ae582ffb7fff /drivers/vhost/net.c
parentc4fcb586c337f8b8de3a3d7ba8514eed03695f6e (diff)
vhost: move -net specific code out
Zerocopy handling code is vhost-net specific. Move it from vhost.c/vhost.h out to net.c Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f80ae5fc9b00..532fc8830c42 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -126,6 +126,42 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
126 net->tx_poll_state = VHOST_NET_POLL_STARTED; 126 net->tx_poll_state = VHOST_NET_POLL_STARTED;
127} 127}
128 128
129/* In case of DMA done not in order in lower device driver for some reason.
130 * upend_idx is used to track end of used idx, done_idx is used to track head
131 * of used idx. Once lower device DMA done contiguously, we will signal KVM
132 * guest used idx.
133 */
134int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
135{
136 int i;
137 int j = 0;
138
139 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
140 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
141 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
142 vhost_add_used_and_signal(vq->dev, vq,
143 vq->heads[i].id, 0);
144 ++j;
145 } else
146 break;
147 }
148 if (j)
149 vq->done_idx = i;
150 return j;
151}
152
153static void vhost_zerocopy_callback(struct ubuf_info *ubuf, int status)
154{
155 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
156 struct vhost_virtqueue *vq = ubufs->vq;
157
158 vhost_poll_queue(&vq->poll);
159 /* set len to mark this desc buffers done DMA */
160 vq->heads[ubuf->desc].len = status ?
161 VHOST_DMA_FAILED_LEN : VHOST_DMA_DONE_LEN;
162 vhost_ubuf_put(ubufs);
163}
164
129/* Expects to be always run from workqueue - which acts as 165/* Expects to be always run from workqueue - which acts as
130 * read-size critical section for our kind of RCU. */ 166 * read-size critical section for our kind of RCU. */
131static void handle_tx(struct vhost_net *net) 167static void handle_tx(struct vhost_net *net)
@@ -594,9 +630,18 @@ static int vhost_net_release(struct inode *inode, struct file *f)
594 struct vhost_net *n = f->private_data; 630 struct vhost_net *n = f->private_data;
595 struct socket *tx_sock; 631 struct socket *tx_sock;
596 struct socket *rx_sock; 632 struct socket *rx_sock;
633 int i;
597 634
598 vhost_net_stop(n, &tx_sock, &rx_sock); 635 vhost_net_stop(n, &tx_sock, &rx_sock);
599 vhost_net_flush(n); 636 vhost_net_flush(n);
637 vhost_dev_stop(&n->dev);
638 for (i = 0; i < n->dev.nvqs; ++i) {
639 /* Wait for all lower device DMAs done. */
640 if (n->dev.vqs[i].ubufs)
641 vhost_ubuf_put_and_wait(n->dev.vqs[i].ubufs);
642
643 vhost_zerocopy_signal_used(n, &n->dev.vqs[i]);
644 }
600 vhost_dev_cleanup(&n->dev, false); 645 vhost_dev_cleanup(&n->dev, false);
601 if (tx_sock) 646 if (tx_sock)
602 fput(tx_sock->file); 647 fput(tx_sock->file);