diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2013-04-28 08:51:40 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2013-05-01 03:02:53 -0400 |
commit | 81f95a55802be669b3191b2828c34006d0f04214 (patch) | |
tree | a13e1b13c962f738c53d15411d9b4ab1584dfdae /drivers | |
parent | 3dfbff328f0491b7049673cf7fd568d26a14fc4d (diff) |
vhost: move per-vq net specific fields out to net
This will remove the need for vhost scsi to pull
in virtio-net.h.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/vhost/net.c | 43 | ||||
-rw-r--r-- | drivers/vhost/vhost.c | 2 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 3 |
3 files changed, 27 insertions, 21 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 8672e0538d59..e34e195b9cf6 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -72,6 +72,12 @@ struct vhost_ubuf_ref { | |||
72 | 72 | ||
73 | struct vhost_net_virtqueue { | 73 | struct vhost_net_virtqueue { |
74 | struct vhost_virtqueue vq; | 74 | struct vhost_virtqueue vq; |
75 | /* hdr is used to store the virtio header. | ||
76 | * Since each iovec has >= 1 byte length, we never need more than | ||
77 | * header length entries to store the header. */ | ||
78 | struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; | ||
79 | size_t vhost_hlen; | ||
80 | size_t sock_hlen; | ||
75 | /* vhost zerocopy support fields below: */ | 81 | /* vhost zerocopy support fields below: */ |
76 | /* last used idx for outstanding DMA zerocopy buffers */ | 82 | /* last used idx for outstanding DMA zerocopy buffers */ |
77 | int upend_idx; | 83 | int upend_idx; |
@@ -166,7 +172,7 @@ err: | |||
166 | return -ENOMEM; | 172 | return -ENOMEM; |
167 | } | 173 | } |
168 | 174 | ||
169 | void vhost_net_reset_ubuf_info(struct vhost_net *n) | 175 | void vhost_net_vq_reset(struct vhost_net *n) |
170 | { | 176 | { |
171 | int i; | 177 | int i; |
172 | 178 | ||
@@ -176,6 +182,8 @@ void vhost_net_reset_ubuf_info(struct vhost_net *n) | |||
176 | n->vqs[i].ubufs = NULL; | 182 | n->vqs[i].ubufs = NULL; |
177 | kfree(n->vqs[i].ubuf_info); | 183 | kfree(n->vqs[i].ubuf_info); |
178 | n->vqs[i].ubuf_info = NULL; | 184 | n->vqs[i].ubuf_info = NULL; |
185 | n->vqs[i].vhost_hlen = 0; | ||
186 | n->vqs[i].sock_hlen = 0; | ||
179 | } | 187 | } |
180 | 188 | ||
181 | } | 189 | } |
@@ -302,8 +310,8 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | |||
302 | * read-size critical section for our kind of RCU. */ | 310 | * read-size critical section for our kind of RCU. */ |
303 | static void handle_tx(struct vhost_net *net) | 311 | static void handle_tx(struct vhost_net *net) |
304 | { | 312 | { |
305 | struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_TX].vq; | ||
306 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; | 313 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; |
314 | struct vhost_virtqueue *vq = &nvq->vq; | ||
307 | unsigned out, in, s; | 315 | unsigned out, in, s; |
308 | int head; | 316 | int head; |
309 | struct msghdr msg = { | 317 | struct msghdr msg = { |
@@ -329,7 +337,7 @@ static void handle_tx(struct vhost_net *net) | |||
329 | mutex_lock(&vq->mutex); | 337 | mutex_lock(&vq->mutex); |
330 | vhost_disable_notify(&net->dev, vq); | 338 | vhost_disable_notify(&net->dev, vq); |
331 | 339 | ||
332 | hdr_size = vq->vhost_hlen; | 340 | hdr_size = nvq->vhost_hlen; |
333 | zcopy = nvq->ubufs; | 341 | zcopy = nvq->ubufs; |
334 | 342 | ||
335 | for (;;) { | 343 | for (;;) { |
@@ -369,14 +377,14 @@ static void handle_tx(struct vhost_net *net) | |||
369 | break; | 377 | break; |
370 | } | 378 | } |
371 | /* Skip header. TODO: support TSO. */ | 379 | /* Skip header. TODO: support TSO. */ |
372 | s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out); | 380 | s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out); |
373 | msg.msg_iovlen = out; | 381 | msg.msg_iovlen = out; |
374 | len = iov_length(vq->iov, out); | 382 | len = iov_length(vq->iov, out); |
375 | /* Sanity check */ | 383 | /* Sanity check */ |
376 | if (!len) { | 384 | if (!len) { |
377 | vq_err(vq, "Unexpected header len for TX: " | 385 | vq_err(vq, "Unexpected header len for TX: " |
378 | "%zd expected %zd\n", | 386 | "%zd expected %zd\n", |
379 | iov_length(vq->hdr, s), hdr_size); | 387 | iov_length(nvq->hdr, s), hdr_size); |
380 | break; | 388 | break; |
381 | } | 389 | } |
382 | zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || | 390 | zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || |
@@ -523,7 +531,8 @@ err: | |||
523 | * read-size critical section for our kind of RCU. */ | 531 | * read-size critical section for our kind of RCU. */ |
524 | static void handle_rx(struct vhost_net *net) | 532 | static void handle_rx(struct vhost_net *net) |
525 | { | 533 | { |
526 | struct vhost_virtqueue *vq = &net->vqs[VHOST_NET_VQ_RX].vq; | 534 | struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; |
535 | struct vhost_virtqueue *vq = &nvq->vq; | ||
527 | unsigned uninitialized_var(in), log; | 536 | unsigned uninitialized_var(in), log; |
528 | struct vhost_log *vq_log; | 537 | struct vhost_log *vq_log; |
529 | struct msghdr msg = { | 538 | struct msghdr msg = { |
@@ -551,8 +560,8 @@ static void handle_rx(struct vhost_net *net) | |||
551 | 560 | ||
552 | mutex_lock(&vq->mutex); | 561 | mutex_lock(&vq->mutex); |
553 | vhost_disable_notify(&net->dev, vq); | 562 | vhost_disable_notify(&net->dev, vq); |
554 | vhost_hlen = vq->vhost_hlen; | 563 | vhost_hlen = nvq->vhost_hlen; |
555 | sock_hlen = vq->sock_hlen; | 564 | sock_hlen = nvq->sock_hlen; |
556 | 565 | ||
557 | vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? | 566 | vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? |
558 | vq->log : NULL; | 567 | vq->log : NULL; |
@@ -582,11 +591,11 @@ static void handle_rx(struct vhost_net *net) | |||
582 | /* We don't need to be notified again. */ | 591 | /* We don't need to be notified again. */ |
583 | if (unlikely((vhost_hlen))) | 592 | if (unlikely((vhost_hlen))) |
584 | /* Skip header. TODO: support TSO. */ | 593 | /* Skip header. TODO: support TSO. */ |
585 | move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in); | 594 | move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in); |
586 | else | 595 | else |
587 | /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: | 596 | /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: |
588 | * needed because recvmsg can modify msg_iov. */ | 597 | * needed because recvmsg can modify msg_iov. */ |
589 | copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in); | 598 | copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); |
590 | msg.msg_iovlen = in; | 599 | msg.msg_iovlen = in; |
591 | err = sock->ops->recvmsg(NULL, sock, &msg, | 600 | err = sock->ops->recvmsg(NULL, sock, &msg, |
592 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | 601 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
@@ -600,7 +609,7 @@ static void handle_rx(struct vhost_net *net) | |||
600 | continue; | 609 | continue; |
601 | } | 610 | } |
602 | if (unlikely(vhost_hlen) && | 611 | if (unlikely(vhost_hlen) && |
603 | memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0, | 612 | memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0, |
604 | vhost_hlen)) { | 613 | vhost_hlen)) { |
605 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", | 614 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", |
606 | vq->iov->iov_base); | 615 | vq->iov->iov_base); |
@@ -608,7 +617,7 @@ static void handle_rx(struct vhost_net *net) | |||
608 | } | 617 | } |
609 | /* TODO: Should check and handle checksum. */ | 618 | /* TODO: Should check and handle checksum. */ |
610 | if (likely(mergeable) && | 619 | if (likely(mergeable) && |
611 | memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, | 620 | memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, |
612 | offsetof(typeof(hdr), num_buffers), | 621 | offsetof(typeof(hdr), num_buffers), |
613 | sizeof hdr.num_buffers)) { | 622 | sizeof hdr.num_buffers)) { |
614 | vq_err(vq, "Failed num_buffers write"); | 623 | vq_err(vq, "Failed num_buffers write"); |
@@ -686,6 +695,8 @@ static int vhost_net_open(struct inode *inode, struct file *f) | |||
686 | n->vqs[i].ubuf_info = NULL; | 695 | n->vqs[i].ubuf_info = NULL; |
687 | n->vqs[i].upend_idx = 0; | 696 | n->vqs[i].upend_idx = 0; |
688 | n->vqs[i].done_idx = 0; | 697 | n->vqs[i].done_idx = 0; |
698 | n->vqs[i].vhost_hlen = 0; | ||
699 | n->vqs[i].sock_hlen = 0; | ||
689 | } | 700 | } |
690 | r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); | 701 | r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); |
691 | if (r < 0) { | 702 | if (r < 0) { |
@@ -783,7 +794,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) | |||
783 | vhost_net_flush(n); | 794 | vhost_net_flush(n); |
784 | vhost_dev_stop(&n->dev); | 795 | vhost_dev_stop(&n->dev); |
785 | vhost_dev_cleanup(&n->dev, false); | 796 | vhost_dev_cleanup(&n->dev, false); |
786 | vhost_net_reset_ubuf_info(n); | 797 | vhost_net_vq_reset(n); |
787 | if (tx_sock) | 798 | if (tx_sock) |
788 | fput(tx_sock->file); | 799 | fput(tx_sock->file); |
789 | if (rx_sock) | 800 | if (rx_sock) |
@@ -964,7 +975,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) | |||
964 | vhost_net_stop(n, &tx_sock, &rx_sock); | 975 | vhost_net_stop(n, &tx_sock, &rx_sock); |
965 | vhost_net_flush(n); | 976 | vhost_net_flush(n); |
966 | err = vhost_dev_reset_owner(&n->dev); | 977 | err = vhost_dev_reset_owner(&n->dev); |
967 | vhost_net_reset_ubuf_info(n); | 978 | vhost_net_vq_reset(n); |
968 | done: | 979 | done: |
969 | mutex_unlock(&n->dev.mutex); | 980 | mutex_unlock(&n->dev.mutex); |
970 | if (tx_sock) | 981 | if (tx_sock) |
@@ -1001,8 +1012,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) | |||
1001 | smp_wmb(); | 1012 | smp_wmb(); |
1002 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { | 1013 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
1003 | mutex_lock(&n->vqs[i].vq.mutex); | 1014 | mutex_lock(&n->vqs[i].vq.mutex); |
1004 | n->vqs[i].vq.vhost_hlen = vhost_hlen; | 1015 | n->vqs[i].vhost_hlen = vhost_hlen; |
1005 | n->vqs[i].vq.sock_hlen = sock_hlen; | 1016 | n->vqs[i].sock_hlen = sock_hlen; |
1006 | mutex_unlock(&n->vqs[i].vq.mutex); | 1017 | mutex_unlock(&n->vqs[i].vq.mutex); |
1007 | } | 1018 | } |
1008 | vhost_net_flush(n); | 1019 | vhost_net_flush(n); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 6644812e99b4..6dcd81c87432 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -179,8 +179,6 @@ static void vhost_vq_reset(struct vhost_dev *dev, | |||
179 | vq->used_flags = 0; | 179 | vq->used_flags = 0; |
180 | vq->log_used = false; | 180 | vq->log_used = false; |
181 | vq->log_addr = -1ull; | 181 | vq->log_addr = -1ull; |
182 | vq->vhost_hlen = 0; | ||
183 | vq->sock_hlen = 0; | ||
184 | vq->private_data = NULL; | 182 | vq->private_data = NULL; |
185 | vq->log_base = NULL; | 183 | vq->log_base = NULL; |
186 | vq->error_ctx = NULL; | 184 | vq->error_ctx = NULL; |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 3a36712e0792..1627eec0ca25 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -102,10 +102,7 @@ struct vhost_virtqueue { | |||
102 | /* hdr is used to store the virtio header. | 102 | /* hdr is used to store the virtio header. |
103 | * Since each iovec has >= 1 byte length, we never need more than | 103 | * Since each iovec has >= 1 byte length, we never need more than |
104 | * header length entries to store the header. */ | 104 | * header length entries to store the header. */ |
105 | struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; | ||
106 | struct iovec *indirect; | 105 | struct iovec *indirect; |
107 | size_t vhost_hlen; | ||
108 | size_t sock_hlen; | ||
109 | struct vring_used_elem *heads; | 106 | struct vring_used_elem *heads; |
110 | /* We use a kind of RCU to access private pointer. | 107 | /* We use a kind of RCU to access private pointer. |
111 | * All readers access it from worker, which makes it possible to | 108 | * All readers access it from worker, which makes it possible to |