aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-23 17:38:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-23 17:38:20 -0400
commita030cbc35091630261194745389cace7706abafa (patch)
tree4b8fe8bea87403e6878db3d1c6e0212def97f2ec /drivers/vhost
parent55c62960b0a7f378a9ab5237a2f9cf02dfe95a36 (diff)
parent22fa90c7fb479694d6affebc049d21f06b714be6 (diff)
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull vhost fixes from Michael Tsirkin: "vhost: more fixes for 3.11 This includes some fixes for vhost net and scsi drivers. The test module has already been reworked to avoid rcu usage, but the necessary core changes are missing, we fixed this. Unlikely to affect any real-world users, but it's early in the cycle so, let's merge them" (It was earlier when Michael originally sent the email, but it somehot got missed in the flood, so here it is after -rc2) * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: vhost: Remove custom vhost rcu usage vhost-scsi: Always access vq->private_data under vq mutex vhost-net: Always access vq->private_data under vq mutex
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c37
-rw-r--r--drivers/vhost/scsi.c17
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.h10
4 files changed, 26 insertions, 44 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 027be91db139..969a85960e9f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -15,7 +15,6 @@
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18#include <linux/rcupdate.h>
19#include <linux/file.h> 18#include <linux/file.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21 20
@@ -346,12 +345,11 @@ static void handle_tx(struct vhost_net *net)
346 struct vhost_net_ubuf_ref *uninitialized_var(ubufs); 345 struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
347 bool zcopy, zcopy_used; 346 bool zcopy, zcopy_used;
348 347
349 /* TODO: check that we are running from vhost_worker? */ 348 mutex_lock(&vq->mutex);
350 sock = rcu_dereference_check(vq->private_data, 1); 349 sock = vq->private_data;
351 if (!sock) 350 if (!sock)
352 return; 351 goto out;
353 352
354 mutex_lock(&vq->mutex);
355 vhost_disable_notify(&net->dev, vq); 353 vhost_disable_notify(&net->dev, vq);
356 354
357 hdr_size = nvq->vhost_hlen; 355 hdr_size = nvq->vhost_hlen;
@@ -461,7 +459,7 @@ static void handle_tx(struct vhost_net *net)
461 break; 459 break;
462 } 460 }
463 } 461 }
464 462out:
465 mutex_unlock(&vq->mutex); 463 mutex_unlock(&vq->mutex);
466} 464}
467 465
@@ -570,14 +568,14 @@ static void handle_rx(struct vhost_net *net)
570 s16 headcount; 568 s16 headcount;
571 size_t vhost_hlen, sock_hlen; 569 size_t vhost_hlen, sock_hlen;
572 size_t vhost_len, sock_len; 570 size_t vhost_len, sock_len;
573 /* TODO: check that we are running from vhost_worker? */ 571 struct socket *sock;
574 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
575
576 if (!sock)
577 return;
578 572
579 mutex_lock(&vq->mutex); 573 mutex_lock(&vq->mutex);
574 sock = vq->private_data;
575 if (!sock)
576 goto out;
580 vhost_disable_notify(&net->dev, vq); 577 vhost_disable_notify(&net->dev, vq);
578
581 vhost_hlen = nvq->vhost_hlen; 579 vhost_hlen = nvq->vhost_hlen;
582 sock_hlen = nvq->sock_hlen; 580 sock_hlen = nvq->sock_hlen;
583 581
@@ -652,7 +650,7 @@ static void handle_rx(struct vhost_net *net)
652 break; 650 break;
653 } 651 }
654 } 652 }
655 653out:
656 mutex_unlock(&vq->mutex); 654 mutex_unlock(&vq->mutex);
657} 655}
658 656
@@ -750,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
750 struct vhost_poll *poll = n->poll + (nvq - n->vqs); 748 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
751 struct socket *sock; 749 struct socket *sock;
752 750
753 sock = rcu_dereference_protected(vq->private_data, 751 sock = vq->private_data;
754 lockdep_is_held(&vq->mutex));
755 if (!sock) 752 if (!sock)
756 return 0; 753 return 0;
757 754
@@ -764,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
764 struct socket *sock; 761 struct socket *sock;
765 762
766 mutex_lock(&vq->mutex); 763 mutex_lock(&vq->mutex);
767 sock = rcu_dereference_protected(vq->private_data, 764 sock = vq->private_data;
768 lockdep_is_held(&vq->mutex));
769 vhost_net_disable_vq(n, vq); 765 vhost_net_disable_vq(n, vq);
770 rcu_assign_pointer(vq->private_data, NULL); 766 vq->private_data = NULL;
771 mutex_unlock(&vq->mutex); 767 mutex_unlock(&vq->mutex);
772 return sock; 768 return sock;
773} 769}
@@ -923,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
923 } 919 }
924 920
925 /* start polling new socket */ 921 /* start polling new socket */
926 oldsock = rcu_dereference_protected(vq->private_data, 922 oldsock = vq->private_data;
927 lockdep_is_held(&vq->mutex));
928 if (sock != oldsock) { 923 if (sock != oldsock) {
929 ubufs = vhost_net_ubuf_alloc(vq, 924 ubufs = vhost_net_ubuf_alloc(vq,
930 sock && vhost_sock_zcopy(sock)); 925 sock && vhost_sock_zcopy(sock));
@@ -934,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
934 } 929 }
935 930
936 vhost_net_disable_vq(n, vq); 931 vhost_net_disable_vq(n, vq);
937 rcu_assign_pointer(vq->private_data, sock); 932 vq->private_data = sock;
938 r = vhost_init_used(vq); 933 r = vhost_init_used(vq);
939 if (r) 934 if (r)
940 goto err_used; 935 goto err_used;
@@ -968,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
968 return 0; 963 return 0;
969 964
970err_used: 965err_used:
971 rcu_assign_pointer(vq->private_data, oldsock); 966 vq->private_data = oldsock;
972 vhost_net_enable_vq(n, vq); 967 vhost_net_enable_vq(n, vq);
973 if (ubufs) 968 if (ubufs)
974 vhost_net_ubuf_put_wait_and_free(ubufs); 969 vhost_net_ubuf_put_wait_and_free(ubufs);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 06adf31a9248..0c27c7df1b09 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -902,19 +902,15 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
902 int head, ret; 902 int head, ret;
903 u8 target; 903 u8 target;
904 904
905 mutex_lock(&vq->mutex);
905 /* 906 /*
906 * We can handle the vq only after the endpoint is setup by calling the 907 * We can handle the vq only after the endpoint is setup by calling the
907 * VHOST_SCSI_SET_ENDPOINT ioctl. 908 * VHOST_SCSI_SET_ENDPOINT ioctl.
908 *
909 * TODO: Check that we are running from vhost_worker which acts
910 * as read-side critical section for vhost kind of RCU.
911 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
912 */ 909 */
913 vs_tpg = rcu_dereference_check(vq->private_data, 1); 910 vs_tpg = vq->private_data;
914 if (!vs_tpg) 911 if (!vs_tpg)
915 return; 912 goto out;
916 913
917 mutex_lock(&vq->mutex);
918 vhost_disable_notify(&vs->dev, vq); 914 vhost_disable_notify(&vs->dev, vq);
919 915
920 for (;;) { 916 for (;;) {
@@ -1064,6 +1060,7 @@ err_free:
1064 vhost_scsi_free_cmd(cmd); 1060 vhost_scsi_free_cmd(cmd);
1065err_cmd: 1061err_cmd:
1066 vhost_scsi_send_bad_target(vs, vq, head, out); 1062 vhost_scsi_send_bad_target(vs, vq, head, out);
1063out:
1067 mutex_unlock(&vq->mutex); 1064 mutex_unlock(&vq->mutex);
1068} 1065}
1069 1066
@@ -1232,9 +1229,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1232 sizeof(vs->vs_vhost_wwpn)); 1229 sizeof(vs->vs_vhost_wwpn));
1233 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1230 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1234 vq = &vs->vqs[i].vq; 1231 vq = &vs->vqs[i].vq;
1235 /* Flushing the vhost_work acts as synchronize_rcu */
1236 mutex_lock(&vq->mutex); 1232 mutex_lock(&vq->mutex);
1237 rcu_assign_pointer(vq->private_data, vs_tpg); 1233 vq->private_data = vs_tpg;
1238 vhost_init_used(vq); 1234 vhost_init_used(vq);
1239 mutex_unlock(&vq->mutex); 1235 mutex_unlock(&vq->mutex);
1240 } 1236 }
@@ -1313,9 +1309,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1313 if (match) { 1309 if (match) {
1314 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1310 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1315 vq = &vs->vqs[i].vq; 1311 vq = &vs->vqs[i].vq;
1316 /* Flushing the vhost_work acts as synchronize_rcu */
1317 mutex_lock(&vq->mutex); 1312 mutex_lock(&vq->mutex);
1318 rcu_assign_pointer(vq->private_data, NULL); 1313 vq->private_data = NULL;
1319 mutex_unlock(&vq->mutex); 1314 mutex_unlock(&vq->mutex);
1320 } 1315 }
1321 } 1316 }
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a73ea217f24d..339eae85859a 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/rcupdate.h>
17#include <linux/file.h> 16#include <linux/file.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19 18
@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
200 priv = test ? n : NULL; 199 priv = test ? n : NULL;
201 200
202 /* start polling new socket */ 201 /* start polling new socket */
203 oldpriv = rcu_dereference_protected(vq->private_data, 202 oldpriv = vq->private_data;
204 lockdep_is_held(&vq->mutex)); 203 vq->private_data = priv;
205 rcu_assign_pointer(vq->private_data, priv);
206 204
207 r = vhost_init_used(&n->vqs[index]); 205 r = vhost_init_used(&n->vqs[index]);
208 206
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42298cd23c73..4465ed5f316d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -103,14 +103,8 @@ struct vhost_virtqueue {
103 struct iovec iov[UIO_MAXIOV]; 103 struct iovec iov[UIO_MAXIOV];
104 struct iovec *indirect; 104 struct iovec *indirect;
105 struct vring_used_elem *heads; 105 struct vring_used_elem *heads;
106 /* We use a kind of RCU to access private pointer. 106 /* Protected by virtqueue mutex. */
107 * All readers access it from worker, which makes it possible to 107 void *private_data;
108 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
109 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
110 * vhost_work execution acts instead of rcu_read_lock() and the end of
111 * vhost_work execution acts instead of rcu_read_unlock().
112 * Writers use virtqueue mutex. */
113 void __rcu *private_data;
114 /* Log write descriptors */ 108 /* Log write descriptors */
115 void __user *log_base; 109 void __user *log_base;
116 struct vhost_log *log; 110 struct vhost_log *log;