aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
authorAsias He <asias@redhat.com>2013-04-26 23:16:48 -0400
committerMichael S. Tsirkin <mst@redhat.com>2013-05-01 03:02:45 -0400
commit3ab2e420ec1caf4ead233f3161ac7d86fe5d2a9f (patch)
tree6c6237f7bbad368dfbdae34895430280af0d19b0 /drivers/vhost/net.c
parentbc7562355fda8075793bf66094cda573206ec693 (diff)
vhost: Allow device specific fields per vq
This is useful for any device who wants device specific fields per vq. For example, tcm_vhost wants a per vq field to track requests which are in flight on the vq. Also, on top of this we can add patches to move things like ubufs from vhost.h out to net.c. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Asias He <asias@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c64
1 files changed, 41 insertions, 23 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 87c216c1e54e..176aa030dc5f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -64,9 +64,13 @@ enum {
64 VHOST_NET_VQ_MAX = 2, 64 VHOST_NET_VQ_MAX = 2,
65}; 65};
66 66
67struct vhost_net_virtqueue {
68 struct vhost_virtqueue vq;
69};
70
67struct vhost_net { 71struct vhost_net {
68 struct vhost_dev dev; 72 struct vhost_dev dev;
69 struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; 73 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
70 struct vhost_poll poll[VHOST_NET_VQ_MAX]; 74 struct vhost_poll poll[VHOST_NET_VQ_MAX];
71 /* Number of TX recently submitted. 75 /* Number of TX recently submitted.
72 * Protected by tx vq lock. */ 76 * Protected by tx vq lock. */
@@ -198,7 +202,7 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
198 * read-size critical section for our kind of RCU. */ 202 * read-size critical section for our kind of RCU. */
199static void handle_tx(struct vhost_net *net) 203static void handle_tx(struct vhost_net *net)
200{ 204{
201 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; 205 struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq;
202 unsigned out, in, s; 206 unsigned out, in, s;
203 int head; 207 int head;
204 struct msghdr msg = { 208 struct msghdr msg = {
@@ -417,7 +421,7 @@ err:
417 * read-size critical section for our kind of RCU. */ 421 * read-size critical section for our kind of RCU. */
418static void handle_rx(struct vhost_net *net) 422static void handle_rx(struct vhost_net *net)
419{ 423{
420 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; 424 struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_RX].vq;
421 unsigned uninitialized_var(in), log; 425 unsigned uninitialized_var(in), log;
422 struct vhost_log *vq_log; 426 struct vhost_log *vq_log;
423 struct msghdr msg = { 427 struct msghdr msg = {
@@ -559,17 +563,26 @@ static int vhost_net_open(struct inode *inode, struct file *f)
559{ 563{
560 struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); 564 struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
561 struct vhost_dev *dev; 565 struct vhost_dev *dev;
566 struct vhost_virtqueue **vqs;
562 int r; 567 int r;
563 568
564 if (!n) 569 if (!n)
565 return -ENOMEM; 570 return -ENOMEM;
571 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
572 if (!vqs) {
573 kfree(n);
574 return -ENOMEM;
575 }
566 576
567 dev = &n->dev; 577 dev = &n->dev;
568 n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; 578 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
569 n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; 579 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
570 r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); 580 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
581 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
582 r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
571 if (r < 0) { 583 if (r < 0) {
572 kfree(n); 584 kfree(n);
585 kfree(vqs);
573 return r; 586 return r;
574 } 587 }
575 588
@@ -584,7 +597,9 @@ static int vhost_net_open(struct inode *inode, struct file *f)
584static void vhost_net_disable_vq(struct vhost_net *n, 597static void vhost_net_disable_vq(struct vhost_net *n,
585 struct vhost_virtqueue *vq) 598 struct vhost_virtqueue *vq)
586{ 599{
587 struct vhost_poll *poll = n->poll + (vq - n->vqs); 600 struct vhost_net_virtqueue *nvq =
601 container_of(vq, struct vhost_net_virtqueue, vq);
602 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
588 if (!vq->private_data) 603 if (!vq->private_data)
589 return; 604 return;
590 vhost_poll_stop(poll); 605 vhost_poll_stop(poll);
@@ -593,7 +608,9 @@ static void vhost_net_disable_vq(struct vhost_net *n,
593static int vhost_net_enable_vq(struct vhost_net *n, 608static int vhost_net_enable_vq(struct vhost_net *n,
594 struct vhost_virtqueue *vq) 609 struct vhost_virtqueue *vq)
595{ 610{
596 struct vhost_poll *poll = n->poll + (vq - n->vqs); 611 struct vhost_net_virtqueue *nvq =
612 container_of(vq, struct vhost_net_virtqueue, vq);
613 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
597 struct socket *sock; 614 struct socket *sock;
598 615
599 sock = rcu_dereference_protected(vq->private_data, 616 sock = rcu_dereference_protected(vq->private_data,
@@ -621,30 +638,30 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
621static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, 638static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
622 struct socket **rx_sock) 639 struct socket **rx_sock)
623{ 640{
624 *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); 641 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
625 *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); 642 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
626} 643}
627 644
628static void vhost_net_flush_vq(struct vhost_net *n, int index) 645static void vhost_net_flush_vq(struct vhost_net *n, int index)
629{ 646{
630 vhost_poll_flush(n->poll + index); 647 vhost_poll_flush(n->poll + index);
631 vhost_poll_flush(&n->dev.vqs[index].poll); 648 vhost_poll_flush(&n->vqs[index].vq.poll);
632} 649}
633 650
634static void vhost_net_flush(struct vhost_net *n) 651static void vhost_net_flush(struct vhost_net *n)
635{ 652{
636 vhost_net_flush_vq(n, VHOST_NET_VQ_TX); 653 vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
637 vhost_net_flush_vq(n, VHOST_NET_VQ_RX); 654 vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
638 if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) { 655 if (n->vqs[VHOST_NET_VQ_TX].vq.ubufs) {
639 mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); 656 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
640 n->tx_flush = true; 657 n->tx_flush = true;
641 mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); 658 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
642 /* Wait for all lower device DMAs done. */ 659 /* Wait for all lower device DMAs done. */
643 vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs); 660 vhost_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].vq.ubufs);
644 mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); 661 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
645 n->tx_flush = false; 662 n->tx_flush = false;
646 kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref); 663 kref_init(&n->vqs[VHOST_NET_VQ_TX].vq.ubufs->kref);
647 mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); 664 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
648 } 665 }
649} 666}
650 667
@@ -665,6 +682,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
665 /* We do an extra flush before freeing memory, 682 /* We do an extra flush before freeing memory,
666 * since jobs can re-queue themselves. */ 683 * since jobs can re-queue themselves. */
667 vhost_net_flush(n); 684 vhost_net_flush(n);
685 kfree(n->dev.vqs);
668 kfree(n); 686 kfree(n);
669 return 0; 687 return 0;
670} 688}
@@ -750,7 +768,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
750 r = -ENOBUFS; 768 r = -ENOBUFS;
751 goto err; 769 goto err;
752 } 770 }
753 vq = n->vqs + index; 771 vq = &n->vqs[index].vq;
754 mutex_lock(&vq->mutex); 772 mutex_lock(&vq->mutex);
755 773
756 /* Verify that ring has been setup correctly. */ 774 /* Verify that ring has been setup correctly. */
@@ -870,10 +888,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
870 n->dev.acked_features = features; 888 n->dev.acked_features = features;
871 smp_wmb(); 889 smp_wmb();
872 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 890 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
873 mutex_lock(&n->vqs[i].mutex); 891 mutex_lock(&n->vqs[i].vq.mutex);
874 n->vqs[i].vhost_hlen = vhost_hlen; 892 n->vqs[i].vq.vhost_hlen = vhost_hlen;
875 n->vqs[i].sock_hlen = sock_hlen; 893 n->vqs[i].vq.sock_hlen = sock_hlen;
876 mutex_unlock(&n->vqs[i].mutex); 894 mutex_unlock(&n->vqs[i].vq.mutex);
877 } 895 }
878 vhost_net_flush(n); 896 vhost_net_flush(n);
879 mutex_unlock(&n->dev.mutex); 897 mutex_unlock(&n->dev.mutex);