aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2014-06-05 08:20:23 -0400
committerMichael S. Tsirkin <mst@redhat.com>2014-06-09 09:21:06 -0400
commitea16c51433510f7f758382dec5b933fc0797f244 (patch)
tree51f7ff47424fef4d9a8750f92723782e2d17e63a /drivers/vhost
parent98f9ca0a3faa99b7388578d55eccecf272be4038 (diff)
vhost: move acked_features to VQs
Refactor code to make sure features are only accessed under VQ mutex. This makes everything simpler, no need for RCU here anymore. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/test.c9
-rw-r--r--drivers/vhost/vhost.c36
-rw-r--r--drivers/vhost/vhost.h11
5 files changed, 44 insertions, 42 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e489161d0feb..2bc8f298a4e7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -585,9 +585,9 @@ static void handle_rx(struct vhost_net *net)
585 vhost_hlen = nvq->vhost_hlen; 585 vhost_hlen = nvq->vhost_hlen;
586 sock_hlen = nvq->sock_hlen; 586 sock_hlen = nvq->sock_hlen;
587 587
588 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? 588 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
589 vq->log : NULL; 589 vq->log : NULL;
590 mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); 590 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
591 591
592 while ((sock_len = peek_head_len(sock->sk))) { 592 while ((sock_len = peek_head_len(sock->sk))) {
593 sock_len += sock_hlen; 593 sock_len += sock_hlen;
@@ -1051,15 +1051,13 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
1051 mutex_unlock(&n->dev.mutex); 1051 mutex_unlock(&n->dev.mutex);
1052 return -EFAULT; 1052 return -EFAULT;
1053 } 1053 }
1054 n->dev.acked_features = features;
1055 smp_wmb();
1056 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 1054 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1057 mutex_lock(&n->vqs[i].vq.mutex); 1055 mutex_lock(&n->vqs[i].vq.mutex);
1056 n->vqs[i].vq.acked_features = features;
1058 n->vqs[i].vhost_hlen = vhost_hlen; 1057 n->vqs[i].vhost_hlen = vhost_hlen;
1059 n->vqs[i].sock_hlen = sock_hlen; 1058 n->vqs[i].sock_hlen = sock_hlen;
1060 mutex_unlock(&n->vqs[i].vq.mutex); 1059 mutex_unlock(&n->vqs[i].vq.mutex);
1061 } 1060 }
1062 vhost_net_flush(n);
1063 mutex_unlock(&n->dev.mutex); 1061 mutex_unlock(&n->dev.mutex);
1064 return 0; 1062 return 0;
1065} 1063}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index cf50ce93975b..f1f284fe30fd 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1373,6 +1373,9 @@ err_dev:
1373 1373
1374static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 1374static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1375{ 1375{
1376 struct vhost_virtqueue *vq;
1377 int i;
1378
1376 if (features & ~VHOST_SCSI_FEATURES) 1379 if (features & ~VHOST_SCSI_FEATURES)
1377 return -EOPNOTSUPP; 1380 return -EOPNOTSUPP;
1378 1381
@@ -1382,9 +1385,13 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1382 mutex_unlock(&vs->dev.mutex); 1385 mutex_unlock(&vs->dev.mutex);
1383 return -EFAULT; 1386 return -EFAULT;
1384 } 1387 }
1385 vs->dev.acked_features = features; 1388
1386 smp_wmb(); 1389 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1387 vhost_scsi_flush(vs); 1390 vq = &vs->vqs[i].vq;
1391 mutex_lock(&vq->mutex);
1392 vq->acked_features = features;
1393 mutex_unlock(&vq->mutex);
1394 }
1388 mutex_unlock(&vs->dev.mutex); 1395 mutex_unlock(&vs->dev.mutex);
1389 return 0; 1396 return 0;
1390} 1397}
@@ -1591,10 +1598,6 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1591 return; 1598 return;
1592 1599
1593 mutex_lock(&vs->dev.mutex); 1600 mutex_lock(&vs->dev.mutex);
1594 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1595 mutex_unlock(&vs->dev.mutex);
1596 return;
1597 }
1598 1601
1599 if (plug) 1602 if (plug)
1600 reason = VIRTIO_SCSI_EVT_RESET_RESCAN; 1603 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
@@ -1603,8 +1606,9 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1603 1606
1604 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1607 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1605 mutex_lock(&vq->mutex); 1608 mutex_lock(&vq->mutex);
1606 tcm_vhost_send_evt(vs, tpg, lun, 1609 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1607 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1610 tcm_vhost_send_evt(vs, tpg, lun,
1611 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1608 mutex_unlock(&vq->mutex); 1612 mutex_unlock(&vq->mutex);
1609 mutex_unlock(&vs->dev.mutex); 1613 mutex_unlock(&vs->dev.mutex);
1610} 1614}
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index c2a54fbf7f99..6fa3bf8bdec7 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -241,15 +241,18 @@ done:
241 241
242static int vhost_test_set_features(struct vhost_test *n, u64 features) 242static int vhost_test_set_features(struct vhost_test *n, u64 features)
243{ 243{
244 struct vhost_virtqueue *vq;
245
244 mutex_lock(&n->dev.mutex); 246 mutex_lock(&n->dev.mutex);
245 if ((features & (1 << VHOST_F_LOG_ALL)) && 247 if ((features & (1 << VHOST_F_LOG_ALL)) &&
246 !vhost_log_access_ok(&n->dev)) { 248 !vhost_log_access_ok(&n->dev)) {
247 mutex_unlock(&n->dev.mutex); 249 mutex_unlock(&n->dev.mutex);
248 return -EFAULT; 250 return -EFAULT;
249 } 251 }
250 n->dev.acked_features = features; 252 vq = &n->vqs[VHOST_TEST_VQ];
251 smp_wmb(); 253 mutex_lock(&vq->mutex);
252 vhost_test_flush(n); 254 vq->acked_features = features;
255 mutex_unlock(&vq->mutex);
253 mutex_unlock(&n->dev.mutex); 256 mutex_unlock(&n->dev.mutex);
254 return 0; 257 return 0;
255} 258}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1c05e6030d42..a23870cbbf91 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -191,6 +191,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
191 vq->log_used = false; 191 vq->log_used = false;
192 vq->log_addr = -1ull; 192 vq->log_addr = -1ull;
193 vq->private_data = NULL; 193 vq->private_data = NULL;
194 vq->acked_features = 0;
194 vq->log_base = NULL; 195 vq->log_base = NULL;
195 vq->error_ctx = NULL; 196 vq->error_ctx = NULL;
196 vq->error = NULL; 197 vq->error = NULL;
@@ -524,11 +525,13 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
524 525
525 for (i = 0; i < d->nvqs; ++i) { 526 for (i = 0; i < d->nvqs; ++i) {
526 int ok; 527 int ok;
528 bool log;
529
527 mutex_lock(&d->vqs[i]->mutex); 530 mutex_lock(&d->vqs[i]->mutex);
531 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
528 /* If ring is inactive, will check when it's enabled. */ 532 /* If ring is inactive, will check when it's enabled. */
529 if (d->vqs[i]->private_data) 533 if (d->vqs[i]->private_data)
530 ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, 534 ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
531 log_all);
532 else 535 else
533 ok = 1; 536 ok = 1;
534 mutex_unlock(&d->vqs[i]->mutex); 537 mutex_unlock(&d->vqs[i]->mutex);
@@ -538,12 +541,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
538 return 1; 541 return 1;
539} 542}
540 543
541static int vq_access_ok(struct vhost_dev *d, unsigned int num, 544static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
542 struct vring_desc __user *desc, 545 struct vring_desc __user *desc,
543 struct vring_avail __user *avail, 546 struct vring_avail __user *avail,
544 struct vring_used __user *used) 547 struct vring_used __user *used)
545{ 548{
546 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 549 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
547 return access_ok(VERIFY_READ, desc, num * sizeof *desc) && 550 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
548 access_ok(VERIFY_READ, avail, 551 access_ok(VERIFY_READ, avail,
549 sizeof *avail + num * sizeof *avail->ring + s) && 552 sizeof *avail + num * sizeof *avail->ring + s) &&
@@ -565,16 +568,16 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
565 568
566/* Verify access for write logging. */ 569/* Verify access for write logging. */
567/* Caller should have vq mutex and device mutex */ 570/* Caller should have vq mutex and device mutex */
568static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, 571static int vq_log_access_ok(struct vhost_virtqueue *vq,
569 void __user *log_base) 572 void __user *log_base)
570{ 573{
571 struct vhost_memory *mp; 574 struct vhost_memory *mp;
572 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 575 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
573 576
574 mp = rcu_dereference_protected(vq->dev->memory, 577 mp = rcu_dereference_protected(vq->dev->memory,
575 lockdep_is_held(&vq->mutex)); 578 lockdep_is_held(&vq->mutex));
576 return vq_memory_access_ok(log_base, mp, 579 return vq_memory_access_ok(log_base, mp,
577 vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && 580 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
578 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 581 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
579 sizeof *vq->used + 582 sizeof *vq->used +
580 vq->num * sizeof *vq->used->ring + s)); 583 vq->num * sizeof *vq->used->ring + s));
@@ -584,8 +587,8 @@ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
584/* Caller should have vq mutex and device mutex */ 587/* Caller should have vq mutex and device mutex */
585int vhost_vq_access_ok(struct vhost_virtqueue *vq) 588int vhost_vq_access_ok(struct vhost_virtqueue *vq)
586{ 589{
587 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && 590 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
588 vq_log_access_ok(vq->dev, vq, vq->log_base); 591 vq_log_access_ok(vq, vq->log_base);
589} 592}
590EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 593EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
591 594
@@ -612,8 +615,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
612 return -EFAULT; 615 return -EFAULT;
613 } 616 }
614 617
615 if (!memory_access_ok(d, newmem, 618 if (!memory_access_ok(d, newmem, 0)) {
616 vhost_has_feature(d, VHOST_F_LOG_ALL))) {
617 kfree(newmem); 619 kfree(newmem);
618 return -EFAULT; 620 return -EFAULT;
619 } 621 }
@@ -726,7 +728,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
726 * If it is not, we don't as size might not have been setup. 728 * If it is not, we don't as size might not have been setup.
727 * We will verify when backend is configured. */ 729 * We will verify when backend is configured. */
728 if (vq->private_data) { 730 if (vq->private_data) {
729 if (!vq_access_ok(d, vq->num, 731 if (!vq_access_ok(vq, vq->num,
730 (void __user *)(unsigned long)a.desc_user_addr, 732 (void __user *)(unsigned long)a.desc_user_addr,
731 (void __user *)(unsigned long)a.avail_user_addr, 733 (void __user *)(unsigned long)a.avail_user_addr,
732 (void __user *)(unsigned long)a.used_user_addr)) { 734 (void __user *)(unsigned long)a.used_user_addr)) {
@@ -866,7 +868,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
866 vq = d->vqs[i]; 868 vq = d->vqs[i];
867 mutex_lock(&vq->mutex); 869 mutex_lock(&vq->mutex);
868 /* If ring is inactive, will check when it's enabled. */ 870 /* If ring is inactive, will check when it's enabled. */
869 if (vq->private_data && !vq_log_access_ok(d, vq, base)) 871 if (vq->private_data && !vq_log_access_ok(vq, base))
870 r = -EFAULT; 872 r = -EFAULT;
871 else 873 else
872 vq->log_base = base; 874 vq->log_base = base;
@@ -1434,11 +1436,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1434 * interrupts. */ 1436 * interrupts. */
1435 smp_mb(); 1437 smp_mb();
1436 1438
1437 if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && 1439 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1438 unlikely(vq->avail_idx == vq->last_avail_idx)) 1440 unlikely(vq->avail_idx == vq->last_avail_idx))
1439 return true; 1441 return true;
1440 1442
1441 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 1443 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1442 __u16 flags; 1444 __u16 flags;
1443 if (__get_user(flags, &vq->avail->flags)) { 1445 if (__get_user(flags, &vq->avail->flags)) {
1444 vq_err(vq, "Failed to get flags"); 1446 vq_err(vq, "Failed to get flags");
@@ -1499,7 +1501,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1499 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1501 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1500 return false; 1502 return false;
1501 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 1503 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1502 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 1504 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1503 r = vhost_update_used_flags(vq); 1505 r = vhost_update_used_flags(vq);
1504 if (r) { 1506 if (r) {
1505 vq_err(vq, "Failed to enable notification at %p: %d\n", 1507 vq_err(vq, "Failed to enable notification at %p: %d\n",
@@ -1536,7 +1538,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1536 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1538 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1537 return; 1539 return;
1538 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1540 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1539 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 1541 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
1540 r = vhost_update_used_flags(vq); 1542 r = vhost_update_used_flags(vq);
1541 if (r) 1543 if (r)
1542 vq_err(vq, "Failed to enable notification at %p: %d\n", 1544 vq_err(vq, "Failed to enable notification at %p: %d\n",
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 35eeb2a1bada..ff454a0ec6f5 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -105,6 +105,7 @@ struct vhost_virtqueue {
105 struct vring_used_elem *heads; 105 struct vring_used_elem *heads;
106 /* Protected by virtqueue mutex. */ 106 /* Protected by virtqueue mutex. */
107 void *private_data; 107 void *private_data;
108 unsigned acked_features;
108 /* Log write descriptors */ 109 /* Log write descriptors */
109 void __user *log_base; 110 void __user *log_base;
110 struct vhost_log *log; 111 struct vhost_log *log;
@@ -117,7 +118,6 @@ struct vhost_dev {
117 struct vhost_memory __rcu *memory; 118 struct vhost_memory __rcu *memory;
118 struct mm_struct *mm; 119 struct mm_struct *mm;
119 struct mutex mutex; 120 struct mutex mutex;
120 unsigned acked_features;
121 struct vhost_virtqueue **vqs; 121 struct vhost_virtqueue **vqs;
122 int nvqs; 122 int nvqs;
123 struct file *log_file; 123 struct file *log_file;
@@ -174,13 +174,8 @@ enum {
174 (1ULL << VHOST_F_LOG_ALL), 174 (1ULL << VHOST_F_LOG_ALL),
175}; 175};
176 176
177static inline int vhost_has_feature(struct vhost_dev *dev, int bit) 177static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit)
178{ 178{
179 unsigned acked_features; 179 return vq->acked_features & (1 << bit);
180
181 /* TODO: check that we are running from vhost_worker or dev mutex is
182 * held? */
183 acked_features = rcu_dereference_index_check(dev->acked_features, 1);
184 return acked_features & (1 << bit);
185} 180}
186#endif 181#endif