diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2014-06-05 08:20:23 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2014-06-09 09:21:06 -0400 |
commit | ea16c51433510f7f758382dec5b933fc0797f244 (patch) | |
tree | 51f7ff47424fef4d9a8750f92723782e2d17e63a /drivers/vhost/vhost.c | |
parent | 98f9ca0a3faa99b7388578d55eccecf272be4038 (diff) |
vhost: move acked_features to VQs
Refactor code to make sure features are only accessed
under VQ mutex. This makes everything simpler, no need
for RCU here anymore.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/vhost.c')
-rw-r--r-- | drivers/vhost/vhost.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 1c05e6030d42..a23870cbbf91 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -191,6 +191,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, | |||
191 | vq->log_used = false; | 191 | vq->log_used = false; |
192 | vq->log_addr = -1ull; | 192 | vq->log_addr = -1ull; |
193 | vq->private_data = NULL; | 193 | vq->private_data = NULL; |
194 | vq->acked_features = 0; | ||
194 | vq->log_base = NULL; | 195 | vq->log_base = NULL; |
195 | vq->error_ctx = NULL; | 196 | vq->error_ctx = NULL; |
196 | vq->error = NULL; | 197 | vq->error = NULL; |
@@ -524,11 +525,13 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, | |||
524 | 525 | ||
525 | for (i = 0; i < d->nvqs; ++i) { | 526 | for (i = 0; i < d->nvqs; ++i) { |
526 | int ok; | 527 | int ok; |
528 | bool log; | ||
529 | |||
527 | mutex_lock(&d->vqs[i]->mutex); | 530 | mutex_lock(&d->vqs[i]->mutex); |
531 | log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); | ||
528 | /* If ring is inactive, will check when it's enabled. */ | 532 | /* If ring is inactive, will check when it's enabled. */ |
529 | if (d->vqs[i]->private_data) | 533 | if (d->vqs[i]->private_data) |
530 | ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, | 534 | ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log); |
531 | log_all); | ||
532 | else | 535 | else |
533 | ok = 1; | 536 | ok = 1; |
534 | mutex_unlock(&d->vqs[i]->mutex); | 537 | mutex_unlock(&d->vqs[i]->mutex); |
@@ -538,12 +541,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem, | |||
538 | return 1; | 541 | return 1; |
539 | } | 542 | } |
540 | 543 | ||
541 | static int vq_access_ok(struct vhost_dev *d, unsigned int num, | 544 | static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, |
542 | struct vring_desc __user *desc, | 545 | struct vring_desc __user *desc, |
543 | struct vring_avail __user *avail, | 546 | struct vring_avail __user *avail, |
544 | struct vring_used __user *used) | 547 | struct vring_used __user *used) |
545 | { | 548 | { |
546 | size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; | 549 | size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; |
547 | return access_ok(VERIFY_READ, desc, num * sizeof *desc) && | 550 | return access_ok(VERIFY_READ, desc, num * sizeof *desc) && |
548 | access_ok(VERIFY_READ, avail, | 551 | access_ok(VERIFY_READ, avail, |
549 | sizeof *avail + num * sizeof *avail->ring + s) && | 552 | sizeof *avail + num * sizeof *avail->ring + s) && |
@@ -565,16 +568,16 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok); | |||
565 | 568 | ||
566 | /* Verify access for write logging. */ | 569 | /* Verify access for write logging. */ |
567 | /* Caller should have vq mutex and device mutex */ | 570 | /* Caller should have vq mutex and device mutex */ |
568 | static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, | 571 | static int vq_log_access_ok(struct vhost_virtqueue *vq, |
569 | void __user *log_base) | 572 | void __user *log_base) |
570 | { | 573 | { |
571 | struct vhost_memory *mp; | 574 | struct vhost_memory *mp; |
572 | size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; | 575 | size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; |
573 | 576 | ||
574 | mp = rcu_dereference_protected(vq->dev->memory, | 577 | mp = rcu_dereference_protected(vq->dev->memory, |
575 | lockdep_is_held(&vq->mutex)); | 578 | lockdep_is_held(&vq->mutex)); |
576 | return vq_memory_access_ok(log_base, mp, | 579 | return vq_memory_access_ok(log_base, mp, |
577 | vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && | 580 | vhost_has_feature(vq, VHOST_F_LOG_ALL)) && |
578 | (!vq->log_used || log_access_ok(log_base, vq->log_addr, | 581 | (!vq->log_used || log_access_ok(log_base, vq->log_addr, |
579 | sizeof *vq->used + | 582 | sizeof *vq->used + |
580 | vq->num * sizeof *vq->used->ring + s)); | 583 | vq->num * sizeof *vq->used->ring + s)); |
@@ -584,8 +587,8 @@ static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, | |||
584 | /* Caller should have vq mutex and device mutex */ | 587 | /* Caller should have vq mutex and device mutex */ |
585 | int vhost_vq_access_ok(struct vhost_virtqueue *vq) | 588 | int vhost_vq_access_ok(struct vhost_virtqueue *vq) |
586 | { | 589 | { |
587 | return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && | 590 | return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && |
588 | vq_log_access_ok(vq->dev, vq, vq->log_base); | 591 | vq_log_access_ok(vq, vq->log_base); |
589 | } | 592 | } |
590 | EXPORT_SYMBOL_GPL(vhost_vq_access_ok); | 593 | EXPORT_SYMBOL_GPL(vhost_vq_access_ok); |
591 | 594 | ||
@@ -612,8 +615,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
612 | return -EFAULT; | 615 | return -EFAULT; |
613 | } | 616 | } |
614 | 617 | ||
615 | if (!memory_access_ok(d, newmem, | 618 | if (!memory_access_ok(d, newmem, 0)) { |
616 | vhost_has_feature(d, VHOST_F_LOG_ALL))) { | ||
617 | kfree(newmem); | 619 | kfree(newmem); |
618 | return -EFAULT; | 620 | return -EFAULT; |
619 | } | 621 | } |
@@ -726,7 +728,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) | |||
726 | * If it is not, we don't as size might not have been setup. | 728 | * If it is not, we don't as size might not have been setup. |
727 | * We will verify when backend is configured. */ | 729 | * We will verify when backend is configured. */ |
728 | if (vq->private_data) { | 730 | if (vq->private_data) { |
729 | if (!vq_access_ok(d, vq->num, | 731 | if (!vq_access_ok(vq, vq->num, |
730 | (void __user *)(unsigned long)a.desc_user_addr, | 732 | (void __user *)(unsigned long)a.desc_user_addr, |
731 | (void __user *)(unsigned long)a.avail_user_addr, | 733 | (void __user *)(unsigned long)a.avail_user_addr, |
732 | (void __user *)(unsigned long)a.used_user_addr)) { | 734 | (void __user *)(unsigned long)a.used_user_addr)) { |
@@ -866,7 +868,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) | |||
866 | vq = d->vqs[i]; | 868 | vq = d->vqs[i]; |
867 | mutex_lock(&vq->mutex); | 869 | mutex_lock(&vq->mutex); |
868 | /* If ring is inactive, will check when it's enabled. */ | 870 | /* If ring is inactive, will check when it's enabled. */ |
869 | if (vq->private_data && !vq_log_access_ok(d, vq, base)) | 871 | if (vq->private_data && !vq_log_access_ok(vq, base)) |
870 | r = -EFAULT; | 872 | r = -EFAULT; |
871 | else | 873 | else |
872 | vq->log_base = base; | 874 | vq->log_base = base; |
@@ -1434,11 +1436,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
1434 | * interrupts. */ | 1436 | * interrupts. */ |
1435 | smp_mb(); | 1437 | smp_mb(); |
1436 | 1438 | ||
1437 | if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && | 1439 | if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && |
1438 | unlikely(vq->avail_idx == vq->last_avail_idx)) | 1440 | unlikely(vq->avail_idx == vq->last_avail_idx)) |
1439 | return true; | 1441 | return true; |
1440 | 1442 | ||
1441 | if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { | 1443 | if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { |
1442 | __u16 flags; | 1444 | __u16 flags; |
1443 | if (__get_user(flags, &vq->avail->flags)) { | 1445 | if (__get_user(flags, &vq->avail->flags)) { |
1444 | vq_err(vq, "Failed to get flags"); | 1446 | vq_err(vq, "Failed to get flags"); |
@@ -1499,7 +1501,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
1499 | if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) | 1501 | if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) |
1500 | return false; | 1502 | return false; |
1501 | vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; | 1503 | vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; |
1502 | if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { | 1504 | if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { |
1503 | r = vhost_update_used_flags(vq); | 1505 | r = vhost_update_used_flags(vq); |
1504 | if (r) { | 1506 | if (r) { |
1505 | vq_err(vq, "Failed to enable notification at %p: %d\n", | 1507 | vq_err(vq, "Failed to enable notification at %p: %d\n", |
@@ -1536,7 +1538,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
1536 | if (vq->used_flags & VRING_USED_F_NO_NOTIFY) | 1538 | if (vq->used_flags & VRING_USED_F_NO_NOTIFY) |
1537 | return; | 1539 | return; |
1538 | vq->used_flags |= VRING_USED_F_NO_NOTIFY; | 1540 | vq->used_flags |= VRING_USED_F_NO_NOTIFY; |
1539 | if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { | 1541 | if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { |
1540 | r = vhost_update_used_flags(vq); | 1542 | r = vhost_update_used_flags(vq); |
1541 | if (r) | 1543 | if (r) |
1542 | vq_err(vq, "Failed to enable notification at %p: %d\n", | 1544 | vq_err(vq, "Failed to enable notification at %p: %d\n", |