diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2013-07-09 06:19:18 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-07-28 19:29:55 -0400 |
commit | c23b1ece6112ed0f227fdc881db33c6427b65222 (patch) | |
tree | afe12ef05dbfdbd96e31e233ce78866d1f33081a /drivers/virtio/virtio_ring.c | |
parent | d1a2a1a6efee375e9ac00ec0a718503fd05b4302 (diff) |
virtio: support unlocked queue poll
[ Upstream commit cc229884d3f77ec3b1240e467e0236c3e0647c0c ]
This adds a way to check ring empty state after enable_cb outside any
locks. Will be used by virtio_net.
Note: there's room for more optimization: caller is likely to have a
memory barrier already, which means we might be able to get rid of a
barrier here. Deferring this optimization until we do some
benchmarking.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r-- | drivers/virtio/virtio_ring.c | 56 |
1 files changed, 44 insertions, 12 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5217baf5528c..37d58f84dc50 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq) | |||
607 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); | 607 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
608 | 608 | ||
609 | /** | 609 | /** |
610 | * virtqueue_enable_cb - restart callbacks after disable_cb. | 610 | * virtqueue_enable_cb_prepare - restart callbacks after disable_cb |
611 | * @vq: the struct virtqueue we're talking about. | 611 | * @vq: the struct virtqueue we're talking about. |
612 | * | 612 | * |
613 | * This re-enables callbacks; it returns "false" if there are pending | 613 | * This re-enables callbacks; it returns current queue state |
614 | * buffers in the queue, to detect a possible race between the driver | 614 | * in an opaque unsigned value. This value should be later tested by |
615 | * checking for more work, and enabling callbacks. | 615 | * virtqueue_poll, to detect a possible race between the driver checking for |
616 | * more work, and enabling callbacks. | ||
616 | * | 617 | * |
617 | * Caller must ensure we don't call this with other virtqueue | 618 | * Caller must ensure we don't call this with other virtqueue |
618 | * operations at the same time (except where noted). | 619 | * operations at the same time (except where noted). |
619 | */ | 620 | */ |
620 | bool virtqueue_enable_cb(struct virtqueue *_vq) | 621 | unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) |
621 | { | 622 | { |
622 | struct vring_virtqueue *vq = to_vvq(_vq); | 623 | struct vring_virtqueue *vq = to_vvq(_vq); |
624 | u16 last_used_idx; | ||
623 | 625 | ||
624 | START_USE(vq); | 626 | START_USE(vq); |
625 | 627 | ||
@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) | |||
629 | * either clear the flags bit or point the event index at the next | 631 | * either clear the flags bit or point the event index at the next |
630 | * entry. Always do both to keep code simple. */ | 632 | * entry. Always do both to keep code simple. */ |
631 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | 633 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
632 | vring_used_event(&vq->vring) = vq->last_used_idx; | 634 | vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; |
635 | END_USE(vq); | ||
636 | return last_used_idx; | ||
637 | } | ||
638 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); | ||
639 | |||
640 | /** | ||
641 | * virtqueue_poll - query pending used buffers | ||
642 | * @vq: the struct virtqueue we're talking about. | ||
643 | * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). | ||
644 | * | ||
645 | * Returns "true" if there are pending used buffers in the queue. | ||
646 | * | ||
647 | * This does not need to be serialized. | ||
648 | */ | ||
649 | bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) | ||
650 | { | ||
651 | struct vring_virtqueue *vq = to_vvq(_vq); | ||
652 | |||
633 | virtio_mb(vq->weak_barriers); | 653 | virtio_mb(vq->weak_barriers); |
634 | if (unlikely(more_used(vq))) { | 654 | return (u16)last_used_idx != vq->vring.used->idx; |
635 | END_USE(vq); | 655 | } |
636 | return false; | 656 | EXPORT_SYMBOL_GPL(virtqueue_poll); |
637 | } | ||
638 | 657 | ||
639 | END_USE(vq); | 658 | /** |
640 | return true; | 659 | * virtqueue_enable_cb - restart callbacks after disable_cb. |
660 | * @vq: the struct virtqueue we're talking about. | ||
661 | * | ||
662 | * This re-enables callbacks; it returns "false" if there are pending | ||
663 | * buffers in the queue, to detect a possible race between the driver | ||
664 | * checking for more work, and enabling callbacks. | ||
665 | * | ||
666 | * Caller must ensure we don't call this with other virtqueue | ||
667 | * operations at the same time (except where noted). | ||
668 | */ | ||
669 | bool virtqueue_enable_cb(struct virtqueue *_vq) | ||
670 | { | ||
671 | unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); | ||
672 | return !virtqueue_poll(_vq, last_used_idx); | ||
641 | } | 673 | } |
642 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); | 674 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
643 | 675 | ||